aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2013-07-23 14:57:57 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2013-08-22 06:20:06 -0400
commite509861105a3c1425f3f929bd631f88340b499bf (patch)
tree0616b1c17c1f88dfb63a3bce0774a3e518f49119 /arch/s390/mm
parent416fd0ffb14afead5b1feea14bbf33c2277942ef (diff)
s390/mm: cleanup page table definitions
Improve the encoding of the different pte types and the naming of the page, segment table and region table bits. Due to the different pte encoding the hugetlbfs primitives need to be adapted as well. To improve compatability with common code make the huge ptes use the encoding of normal ptes. The conversion between the pte and pmd encoding for a huge pte is done with set_huge_pte_at and huge_ptep_get. Overall the code is now easier to understand. Reviewed-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/dump_pagetables.c18
-rw-r--r--arch/s390/mm/gup.c6
-rw-r--r--arch/s390/mm/hugetlbpage.c104
-rw-r--r--arch/s390/mm/pageattr.c2
-rw-r--r--arch/s390/mm/pgtable.c59
-rw-r--r--arch/s390/mm/vmem.c14
6 files changed, 146 insertions, 57 deletions
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 3ad65b04ac15..46d517c3c763 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -53,7 +53,7 @@ static void print_prot(struct seq_file *m, unsigned int pr, int level)
53 seq_printf(m, "I\n"); 53 seq_printf(m, "I\n");
54 return; 54 return;
55 } 55 }
56 seq_printf(m, "%s", pr & _PAGE_RO ? "RO " : "RW "); 56 seq_printf(m, "%s", pr & _PAGE_PROTECT ? "RO " : "RW ");
57 seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : " "); 57 seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : " ");
58 seq_putc(m, '\n'); 58 seq_putc(m, '\n');
59} 59}
@@ -105,12 +105,12 @@ static void note_page(struct seq_file *m, struct pg_state *st,
105} 105}
106 106
107/* 107/*
108 * The actual page table walker functions. In order to keep the implementation 108 * The actual page table walker functions. In order to keep the
109 * of print_prot() short, we only check and pass _PAGE_INVALID and _PAGE_RO 109 * implementation of print_prot() short, we only check and pass
110 * flags to note_page() if a region, segment or page table entry is invalid or 110 * _PAGE_INVALID and _PAGE_PROTECT flags to note_page() if a region,
111 * read-only. 111 * segment or page table entry is invalid or read-only.
112 * After all it's just a hint that the current level being walked contains an 112 * After all it's just a hint that the current level being walked
113 * invalid or read-only entry. 113 * contains an invalid or read-only entry.
114 */ 114 */
115static void walk_pte_level(struct seq_file *m, struct pg_state *st, 115static void walk_pte_level(struct seq_file *m, struct pg_state *st,
116 pmd_t *pmd, unsigned long addr) 116 pmd_t *pmd, unsigned long addr)
@@ -122,14 +122,14 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st,
122 for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) { 122 for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) {
123 st->current_address = addr; 123 st->current_address = addr;
124 pte = pte_offset_kernel(pmd, addr); 124 pte = pte_offset_kernel(pmd, addr);
125 prot = pte_val(*pte) & (_PAGE_RO | _PAGE_INVALID); 125 prot = pte_val(*pte) & (_PAGE_PROTECT | _PAGE_INVALID);
126 note_page(m, st, prot, 4); 126 note_page(m, st, prot, 4);
127 addr += PAGE_SIZE; 127 addr += PAGE_SIZE;
128 } 128 }
129} 129}
130 130
131#ifdef CONFIG_64BIT 131#ifdef CONFIG_64BIT
132#define _PMD_PROT_MASK (_SEGMENT_ENTRY_RO | _SEGMENT_ENTRY_CO) 132#define _PMD_PROT_MASK (_SEGMENT_ENTRY_PROTECT | _SEGMENT_ENTRY_CO)
133#else 133#else
134#define _PMD_PROT_MASK 0 134#define _PMD_PROT_MASK 0
135#endif 135#endif
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 1f5315d1215c..5d758db27bdc 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -24,7 +24,7 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
24 pte_t *ptep, pte; 24 pte_t *ptep, pte;
25 struct page *page; 25 struct page *page;
26 26
27 mask = (write ? _PAGE_RO : 0) | _PAGE_INVALID | _PAGE_SPECIAL; 27 mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
28 28
29 ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr); 29 ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
30 do { 30 do {
@@ -55,8 +55,8 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
55 struct page *head, *page, *tail; 55 struct page *head, *page, *tail;
56 int refs; 56 int refs;
57 57
58 result = write ? 0 : _SEGMENT_ENTRY_RO; 58 result = write ? 0 : _SEGMENT_ENTRY_PROTECT;
59 mask = result | _SEGMENT_ENTRY_INV; 59 mask = result | _SEGMENT_ENTRY_INVALID;
60 if ((pmd_val(pmd) & mask) != result) 60 if ((pmd_val(pmd) & mask) != result)
61 return 0; 61 return 0;
62 VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT)); 62 VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 121089d57802..b0bd0ae17796 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -8,21 +8,107 @@
8#include <linux/mm.h> 8#include <linux/mm.h>
9#include <linux/hugetlb.h> 9#include <linux/hugetlb.h>
10 10
11static inline pmd_t __pte_to_pmd(pte_t pte)
12{
13 int none, prot;
14 pmd_t pmd;
15
16 /*
17 * Convert encoding pte bits pmd bits
18 * .IR.....wdtp ..R...I.....
19 * empty .10.....0000 -> ..0...1.....
20 * prot-none, clean .11.....0001 -> ..1...1.....
21 * prot-none, dirty .10.....0101 -> ..1...1.....
22 * read-only, clean .01.....0001 -> ..1...0.....
23 * read-only, dirty .01.....0101 -> ..1...0.....
24 * read-write, clean .01.....1001 -> ..0...0.....
25 * read-write, dirty .00.....1101 -> ..0...0.....
26 * Huge ptes are dirty by definition, a clean pte is made dirty
27 * by the conversion.
28 */
29 if (pte_present(pte)) {
30 pmd_val(pmd) = pte_val(pte) & PAGE_MASK;
31 if (pte_val(pte) & _PAGE_INVALID)
32 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
33 none = (pte_val(pte) & _PAGE_PRESENT) &&
34 (pte_val(pte) & _PAGE_INVALID);
35 prot = (pte_val(pte) & _PAGE_PROTECT);
36 if (prot || none)
37 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
38 } else
39 pmd_val(pmd) = _SEGMENT_ENTRY_INVALID;
40 return pmd;
41}
42
43static inline pte_t __pmd_to_pte(pmd_t pmd)
44{
45 pte_t pte;
46
47 /*
48 * Convert encoding pmd bits pte bits
49 * ..R...I..... .IR.....wdtp
50 * empty ..0...1..... -> .10.....0000
51 * prot-none, young ..1...1..... -> .10.....0101
52 * read-only, young ..1...0..... -> .01.....0101
53 * read-write, young ..0...0..... -> .00.....1101
54 * Huge ptes are dirty by definition
55 */
56 if (pmd_present(pmd)) {
57 pte_val(pte) = _PAGE_PRESENT | _PAGE_LARGE | _PAGE_DIRTY |
58 (pmd_val(pmd) & PAGE_MASK);
59 if (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID)
60 pte_val(pte) |= _PAGE_INVALID;
61 else {
62 if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
63 pte_val(pte) |= _PAGE_PROTECT;
64 else
65 pte_val(pte) |= _PAGE_WRITE;
66 }
67 } else
68 pte_val(pte) = _PAGE_INVALID;
69 return pte;
70}
11 71
12void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 72void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
13 pte_t *pteptr, pte_t pteval) 73 pte_t *ptep, pte_t pte)
14{ 74{
15 pmd_t *pmdp = (pmd_t *) pteptr; 75 pmd_t pmd;
16 unsigned long mask;
17 76
77 pmd = __pte_to_pmd(pte);
18 if (!MACHINE_HAS_HPAGE) { 78 if (!MACHINE_HAS_HPAGE) {
19 pteptr = (pte_t *) pte_page(pteval)[1].index; 79 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
20 mask = pte_val(pteval) & 80 pmd_val(pmd) |= pte_page(pte)[1].index;
21 (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO); 81 } else
22 pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask; 82 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO;
83 *(pmd_t *) ptep = pmd;
84}
85
86pte_t huge_ptep_get(pte_t *ptep)
87{
88 unsigned long origin;
89 pmd_t pmd;
90
91 pmd = *(pmd_t *) ptep;
92 if (!MACHINE_HAS_HPAGE && pmd_present(pmd)) {
93 origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
94 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
95 pmd_val(pmd) |= *(unsigned long *) origin;
23 } 96 }
97 return __pmd_to_pte(pmd);
98}
24 99
25 pmd_val(*pmdp) = pte_val(pteval); 100pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
101 unsigned long addr, pte_t *ptep)
102{
103 pmd_t *pmdp = (pmd_t *) ptep;
104 pte_t pte = huge_ptep_get(ptep);
105
106 if (MACHINE_HAS_IDTE)
107 __pmd_idte(addr, pmdp);
108 else
109 __pmd_csp(pmdp);
110 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
111 return pte;
26} 112}
27 113
28int arch_prepare_hugepage(struct page *page) 114int arch_prepare_hugepage(struct page *page)
@@ -58,7 +144,7 @@ void arch_release_hugepage(struct page *page)
58 ptep = (pte_t *) page[1].index; 144 ptep = (pte_t *) page[1].index;
59 if (!ptep) 145 if (!ptep)
60 return; 146 return;
61 clear_table((unsigned long *) ptep, _PAGE_TYPE_EMPTY, 147 clear_table((unsigned long *) ptep, _PAGE_INVALID,
62 PTRS_PER_PTE * sizeof(pte_t)); 148 PTRS_PER_PTE * sizeof(pte_t));
63 page_table_free(&init_mm, (unsigned long *) ptep); 149 page_table_free(&init_mm, (unsigned long *) ptep);
64 page[1].index = 0; 150 page[1].index = 0;
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 80adfbf75065..990397420e6b 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -118,7 +118,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
118 pte = pte_offset_kernel(pmd, address); 118 pte = pte_offset_kernel(pmd, address);
119 if (!enable) { 119 if (!enable) {
120 __ptep_ipte(address, pte); 120 __ptep_ipte(address, pte);
121 pte_val(*pte) = _PAGE_TYPE_EMPTY; 121 pte_val(*pte) = _PAGE_INVALID;
122 continue; 122 continue;
123 } 123 }
124 pte_val(*pte) = __pa(address); 124 pte_val(*pte) = __pa(address);
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index a8154a1a2c94..b9d35d63934e 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -161,7 +161,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
161 struct gmap_rmap *rmap; 161 struct gmap_rmap *rmap;
162 struct page *page; 162 struct page *page;
163 163
164 if (*table & _SEGMENT_ENTRY_INV) 164 if (*table & _SEGMENT_ENTRY_INVALID)
165 return 0; 165 return 0;
166 page = pfn_to_page(*table >> PAGE_SHIFT); 166 page = pfn_to_page(*table >> PAGE_SHIFT);
167 mp = (struct gmap_pgtable *) page->index; 167 mp = (struct gmap_pgtable *) page->index;
@@ -172,7 +172,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
172 kfree(rmap); 172 kfree(rmap);
173 break; 173 break;
174 } 174 }
175 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; 175 *table = mp->vmaddr | _SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_PROTECT;
176 return 1; 176 return 1;
177} 177}
178 178
@@ -258,7 +258,7 @@ static int gmap_alloc_table(struct gmap *gmap,
258 return -ENOMEM; 258 return -ENOMEM;
259 new = (unsigned long *) page_to_phys(page); 259 new = (unsigned long *) page_to_phys(page);
260 crst_table_init(new, init); 260 crst_table_init(new, init);
261 if (*table & _REGION_ENTRY_INV) { 261 if (*table & _REGION_ENTRY_INVALID) {
262 list_add(&page->lru, &gmap->crst_list); 262 list_add(&page->lru, &gmap->crst_list);
263 *table = (unsigned long) new | _REGION_ENTRY_LENGTH | 263 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
264 (*table & _REGION_ENTRY_TYPE_MASK); 264 (*table & _REGION_ENTRY_TYPE_MASK);
@@ -292,22 +292,22 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
292 for (off = 0; off < len; off += PMD_SIZE) { 292 for (off = 0; off < len; off += PMD_SIZE) {
293 /* Walk the guest addr space page table */ 293 /* Walk the guest addr space page table */
294 table = gmap->table + (((to + off) >> 53) & 0x7ff); 294 table = gmap->table + (((to + off) >> 53) & 0x7ff);
295 if (*table & _REGION_ENTRY_INV) 295 if (*table & _REGION_ENTRY_INVALID)
296 goto out; 296 goto out;
297 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 297 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
298 table = table + (((to + off) >> 42) & 0x7ff); 298 table = table + (((to + off) >> 42) & 0x7ff);
299 if (*table & _REGION_ENTRY_INV) 299 if (*table & _REGION_ENTRY_INVALID)
300 goto out; 300 goto out;
301 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 301 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
302 table = table + (((to + off) >> 31) & 0x7ff); 302 table = table + (((to + off) >> 31) & 0x7ff);
303 if (*table & _REGION_ENTRY_INV) 303 if (*table & _REGION_ENTRY_INVALID)
304 goto out; 304 goto out;
305 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 305 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
306 table = table + (((to + off) >> 20) & 0x7ff); 306 table = table + (((to + off) >> 20) & 0x7ff);
307 307
308 /* Clear segment table entry in guest address space. */ 308 /* Clear segment table entry in guest address space. */
309 flush |= gmap_unlink_segment(gmap, table); 309 flush |= gmap_unlink_segment(gmap, table);
310 *table = _SEGMENT_ENTRY_INV; 310 *table = _SEGMENT_ENTRY_INVALID;
311 } 311 }
312out: 312out:
313 spin_unlock(&gmap->mm->page_table_lock); 313 spin_unlock(&gmap->mm->page_table_lock);
@@ -345,17 +345,17 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
345 for (off = 0; off < len; off += PMD_SIZE) { 345 for (off = 0; off < len; off += PMD_SIZE) {
346 /* Walk the gmap address space page table */ 346 /* Walk the gmap address space page table */
347 table = gmap->table + (((to + off) >> 53) & 0x7ff); 347 table = gmap->table + (((to + off) >> 53) & 0x7ff);
348 if ((*table & _REGION_ENTRY_INV) && 348 if ((*table & _REGION_ENTRY_INVALID) &&
349 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY)) 349 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
350 goto out_unmap; 350 goto out_unmap;
351 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 351 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
352 table = table + (((to + off) >> 42) & 0x7ff); 352 table = table + (((to + off) >> 42) & 0x7ff);
353 if ((*table & _REGION_ENTRY_INV) && 353 if ((*table & _REGION_ENTRY_INVALID) &&
354 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY)) 354 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
355 goto out_unmap; 355 goto out_unmap;
356 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 356 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
357 table = table + (((to + off) >> 31) & 0x7ff); 357 table = table + (((to + off) >> 31) & 0x7ff);
358 if ((*table & _REGION_ENTRY_INV) && 358 if ((*table & _REGION_ENTRY_INVALID) &&
359 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY)) 359 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
360 goto out_unmap; 360 goto out_unmap;
361 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN); 361 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
@@ -363,7 +363,8 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
363 363
364 /* Store 'from' address in an invalid segment table entry. */ 364 /* Store 'from' address in an invalid segment table entry. */
365 flush |= gmap_unlink_segment(gmap, table); 365 flush |= gmap_unlink_segment(gmap, table);
366 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off); 366 *table = (from + off) | (_SEGMENT_ENTRY_INVALID |
367 _SEGMENT_ENTRY_PROTECT);
367 } 368 }
368 spin_unlock(&gmap->mm->page_table_lock); 369 spin_unlock(&gmap->mm->page_table_lock);
369 up_read(&gmap->mm->mmap_sem); 370 up_read(&gmap->mm->mmap_sem);
@@ -384,15 +385,15 @@ static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
384 unsigned long *table; 385 unsigned long *table;
385 386
386 table = gmap->table + ((address >> 53) & 0x7ff); 387 table = gmap->table + ((address >> 53) & 0x7ff);
387 if (unlikely(*table & _REGION_ENTRY_INV)) 388 if (unlikely(*table & _REGION_ENTRY_INVALID))
388 return ERR_PTR(-EFAULT); 389 return ERR_PTR(-EFAULT);
389 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 390 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
390 table = table + ((address >> 42) & 0x7ff); 391 table = table + ((address >> 42) & 0x7ff);
391 if (unlikely(*table & _REGION_ENTRY_INV)) 392 if (unlikely(*table & _REGION_ENTRY_INVALID))
392 return ERR_PTR(-EFAULT); 393 return ERR_PTR(-EFAULT);
393 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 394 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
394 table = table + ((address >> 31) & 0x7ff); 395 table = table + ((address >> 31) & 0x7ff);
395 if (unlikely(*table & _REGION_ENTRY_INV)) 396 if (unlikely(*table & _REGION_ENTRY_INVALID))
396 return ERR_PTR(-EFAULT); 397 return ERR_PTR(-EFAULT);
397 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 398 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
398 table = table + ((address >> 20) & 0x7ff); 399 table = table + ((address >> 20) & 0x7ff);
@@ -422,11 +423,11 @@ unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
422 return PTR_ERR(segment_ptr); 423 return PTR_ERR(segment_ptr);
423 /* Convert the gmap address to an mm address. */ 424 /* Convert the gmap address to an mm address. */
424 segment = *segment_ptr; 425 segment = *segment_ptr;
425 if (!(segment & _SEGMENT_ENTRY_INV)) { 426 if (!(segment & _SEGMENT_ENTRY_INVALID)) {
426 page = pfn_to_page(segment >> PAGE_SHIFT); 427 page = pfn_to_page(segment >> PAGE_SHIFT);
427 mp = (struct gmap_pgtable *) page->index; 428 mp = (struct gmap_pgtable *) page->index;
428 return mp->vmaddr | (address & ~PMD_MASK); 429 return mp->vmaddr | (address & ~PMD_MASK);
429 } else if (segment & _SEGMENT_ENTRY_RO) { 430 } else if (segment & _SEGMENT_ENTRY_PROTECT) {
430 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; 431 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
431 return vmaddr | (address & ~PMD_MASK); 432 return vmaddr | (address & ~PMD_MASK);
432 } 433 }
@@ -517,8 +518,8 @@ static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
517 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 518 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
518 mp = (struct gmap_pgtable *) page->index; 519 mp = (struct gmap_pgtable *) page->index;
519 list_for_each_entry_safe(rmap, next, &mp->mapper, list) { 520 list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
520 *rmap->entry = 521 *rmap->entry = mp->vmaddr | (_SEGMENT_ENTRY_INVALID |
521 _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; 522 _SEGMENT_ENTRY_PROTECT);
522 list_del(&rmap->list); 523 list_del(&rmap->list);
523 kfree(rmap); 524 kfree(rmap);
524 flush = 1; 525 flush = 1;
@@ -545,13 +546,13 @@ unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
545 /* Convert the gmap address to an mm address. */ 546 /* Convert the gmap address to an mm address. */
546 while (1) { 547 while (1) {
547 segment = *segment_ptr; 548 segment = *segment_ptr;
548 if (!(segment & _SEGMENT_ENTRY_INV)) { 549 if (!(segment & _SEGMENT_ENTRY_INVALID)) {
549 /* Page table is present */ 550 /* Page table is present */
550 page = pfn_to_page(segment >> PAGE_SHIFT); 551 page = pfn_to_page(segment >> PAGE_SHIFT);
551 mp = (struct gmap_pgtable *) page->index; 552 mp = (struct gmap_pgtable *) page->index;
552 return mp->vmaddr | (address & ~PMD_MASK); 553 return mp->vmaddr | (address & ~PMD_MASK);
553 } 554 }
554 if (!(segment & _SEGMENT_ENTRY_RO)) 555 if (!(segment & _SEGMENT_ENTRY_PROTECT))
555 /* Nothing mapped in the gmap address space. */ 556 /* Nothing mapped in the gmap address space. */
556 break; 557 break;
557 rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap); 558 rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap);
@@ -586,25 +587,25 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
586 while (address < to) { 587 while (address < to) {
587 /* Walk the gmap address space page table */ 588 /* Walk the gmap address space page table */
588 table = gmap->table + ((address >> 53) & 0x7ff); 589 table = gmap->table + ((address >> 53) & 0x7ff);
589 if (unlikely(*table & _REGION_ENTRY_INV)) { 590 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
590 address = (address + PMD_SIZE) & PMD_MASK; 591 address = (address + PMD_SIZE) & PMD_MASK;
591 continue; 592 continue;
592 } 593 }
593 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 594 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
594 table = table + ((address >> 42) & 0x7ff); 595 table = table + ((address >> 42) & 0x7ff);
595 if (unlikely(*table & _REGION_ENTRY_INV)) { 596 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
596 address = (address + PMD_SIZE) & PMD_MASK; 597 address = (address + PMD_SIZE) & PMD_MASK;
597 continue; 598 continue;
598 } 599 }
599 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 600 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
600 table = table + ((address >> 31) & 0x7ff); 601 table = table + ((address >> 31) & 0x7ff);
601 if (unlikely(*table & _REGION_ENTRY_INV)) { 602 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
602 address = (address + PMD_SIZE) & PMD_MASK; 603 address = (address + PMD_SIZE) & PMD_MASK;
603 continue; 604 continue;
604 } 605 }
605 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 606 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
606 table = table + ((address >> 20) & 0x7ff); 607 table = table + ((address >> 20) & 0x7ff);
607 if (unlikely(*table & _SEGMENT_ENTRY_INV)) { 608 if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) {
608 address = (address + PMD_SIZE) & PMD_MASK; 609 address = (address + PMD_SIZE) & PMD_MASK;
609 continue; 610 continue;
610 } 611 }
@@ -687,7 +688,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
687 continue; 688 continue;
688 /* Set notification bit in the pgste of the pte */ 689 /* Set notification bit in the pgste of the pte */
689 entry = *ptep; 690 entry = *ptep;
690 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_RO)) == 0) { 691 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
691 pgste = pgste_get_lock(ptep); 692 pgste = pgste_get_lock(ptep);
692 pgste_val(pgste) |= PGSTE_IN_BIT; 693 pgste_val(pgste) |= PGSTE_IN_BIT;
693 pgste_set_unlock(ptep, pgste); 694 pgste_set_unlock(ptep, pgste);
@@ -752,7 +753,7 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
752 page->index = (unsigned long) mp; 753 page->index = (unsigned long) mp;
753 atomic_set(&page->_mapcount, 3); 754 atomic_set(&page->_mapcount, 3);
754 table = (unsigned long *) page_to_phys(page); 755 table = (unsigned long *) page_to_phys(page);
755 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2); 756 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
756 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); 757 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
757 return table; 758 return table;
758} 759}
@@ -878,7 +879,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
878 pgtable_page_ctor(page); 879 pgtable_page_ctor(page);
879 atomic_set(&page->_mapcount, 1); 880 atomic_set(&page->_mapcount, 1);
880 table = (unsigned long *) page_to_phys(page); 881 table = (unsigned long *) page_to_phys(page);
881 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); 882 clear_table(table, _PAGE_INVALID, PAGE_SIZE);
882 spin_lock_bh(&mm->context.list_lock); 883 spin_lock_bh(&mm->context.list_lock);
883 list_add(&page->lru, &mm->context.pgtable_list); 884 list_add(&page->lru, &mm->context.pgtable_list);
884 } else { 885 } else {
@@ -1198,9 +1199,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
1198 list_del(lh); 1199 list_del(lh);
1199 } 1200 }
1200 ptep = (pte_t *) pgtable; 1201 ptep = (pte_t *) pgtable;
1201 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 1202 pte_val(*ptep) = _PAGE_INVALID;
1202 ptep++; 1203 ptep++;
1203 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 1204 pte_val(*ptep) = _PAGE_INVALID;
1204 return pgtable; 1205 return pgtable;
1205} 1206}
1206#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1207#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 8b268fcc4612..e1299d40818d 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -69,7 +69,7 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address)
69 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); 69 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
70 if (!pte) 70 if (!pte)
71 return NULL; 71 return NULL;
72 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, 72 clear_table((unsigned long *) pte, _PAGE_INVALID,
73 PTRS_PER_PTE * sizeof(pte_t)); 73 PTRS_PER_PTE * sizeof(pte_t));
74 return pte; 74 return pte;
75} 75}
@@ -101,7 +101,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
101 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { 101 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
102 pud_val(*pu_dir) = __pa(address) | 102 pud_val(*pu_dir) = __pa(address) |
103 _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE | 103 _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
104 (ro ? _REGION_ENTRY_RO : 0); 104 (ro ? _REGION_ENTRY_PROTECT : 0);
105 address += PUD_SIZE; 105 address += PUD_SIZE;
106 continue; 106 continue;
107 } 107 }
@@ -118,7 +118,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
118 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { 118 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
119 pmd_val(*pm_dir) = __pa(address) | 119 pmd_val(*pm_dir) = __pa(address) |
120 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE | 120 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
121 (ro ? _SEGMENT_ENTRY_RO : 0); 121 (ro ? _SEGMENT_ENTRY_PROTECT : 0);
122 address += PMD_SIZE; 122 address += PMD_SIZE;
123 continue; 123 continue;
124 } 124 }
@@ -131,7 +131,8 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
131 } 131 }
132 132
133 pt_dir = pte_offset_kernel(pm_dir, address); 133 pt_dir = pte_offset_kernel(pm_dir, address);
134 pte_val(*pt_dir) = __pa(address) | (ro ? _PAGE_RO : 0); 134 pte_val(*pt_dir) = __pa(address) |
135 pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
135 address += PAGE_SIZE; 136 address += PAGE_SIZE;
136 } 137 }
137 ret = 0; 138 ret = 0;
@@ -154,7 +155,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
154 pte_t *pt_dir; 155 pte_t *pt_dir;
155 pte_t pte; 156 pte_t pte;
156 157
157 pte_val(pte) = _PAGE_TYPE_EMPTY; 158 pte_val(pte) = _PAGE_INVALID;
158 while (address < end) { 159 while (address < end) {
159 pg_dir = pgd_offset_k(address); 160 pg_dir = pgd_offset_k(address);
160 if (pgd_none(*pg_dir)) { 161 if (pgd_none(*pg_dir)) {
@@ -255,7 +256,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
255 new_page =__pa(vmem_alloc_pages(0)); 256 new_page =__pa(vmem_alloc_pages(0));
256 if (!new_page) 257 if (!new_page)
257 goto out; 258 goto out;
258 pte_val(*pt_dir) = __pa(new_page); 259 pte_val(*pt_dir) =
260 __pa(new_page) | pgprot_val(PAGE_KERNEL);
259 } 261 }
260 address += PAGE_SIZE; 262 address += PAGE_SIZE;
261 } 263 }