summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2017-07-05 01:37:27 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2017-07-26 02:25:09 -0400
commitf1c1174fa099566f02c809193e9720593b231ae2 (patch)
tree5868d7c9608b5429f59e2b237b7033604ad1fb5a
parentc67da7c7c5d4c0a45b079b21f6991cb7e753856e (diff)
s390/mm: use new mm defines instead of magic values
Reviewed-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/include/asm/elf.h2
-rw-r--r--arch/s390/include/asm/ipl.h2
-rw-r--r--arch/s390/include/asm/mmu_context.h6
-rw-r--r--arch/s390/include/asm/pgalloc.h12
-rw-r--r--arch/s390/include/asm/qdio.h2
-rw-r--r--arch/s390/include/asm/tlb.h6
-rw-r--r--arch/s390/kernel/dumpstack.c2
-rw-r--r--arch/s390/kernel/relocate_kernel.S5
-rw-r--r--arch/s390/kernel/setup.c8
-rw-r--r--arch/s390/kernel/vdso32/vdso32.lds.S4
-rw-r--r--arch/s390/kernel/vdso64/vdso64.lds.S4
-rw-r--r--arch/s390/mm/fault.c10
-rw-r--r--arch/s390/mm/gmap.c124
-rw-r--r--arch/s390/mm/init.c5
-rw-r--r--arch/s390/mm/pgalloc.c10
15 files changed, 103 insertions, 99 deletions
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index c92ed0170be2..65998a1f5d43 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -191,7 +191,7 @@ struct arch_elf_state {
191 } while (0) 191 } while (0)
192 192
193#define CORE_DUMP_USE_REGSET 193#define CORE_DUMP_USE_REGSET
194#define ELF_EXEC_PAGESIZE 4096 194#define ELF_EXEC_PAGESIZE PAGE_SIZE
195 195
196/* 196/*
197 * This is the base location for PIE (ET_DYN with INTERP) loads. On 197 * This is the base location for PIE (ET_DYN with INTERP) loads. On
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index edb5161df7e2..6810bd757312 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -81,7 +81,7 @@ struct ipl_parameter_block {
81 struct ipl_block_fcp fcp; 81 struct ipl_block_fcp fcp;
82 struct ipl_block_ccw ccw; 82 struct ipl_block_ccw ccw;
83 } ipl_info; 83 } ipl_info;
84} __attribute__((packed,aligned(4096))); 84} __packed __aligned(PAGE_SIZE);
85 85
86/* 86/*
87 * IPL validity flags 87 * IPL validity flags
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 4541ac44b35f..92c1eb79ada4 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -33,7 +33,7 @@ static inline int init_new_context(struct task_struct *tsk,
33 mm->context.use_cmma = 0; 33 mm->context.use_cmma = 0;
34#endif 34#endif
35 switch (mm->context.asce_limit) { 35 switch (mm->context.asce_limit) {
36 case 1UL << 42: 36 case _REGION2_SIZE:
37 /* 37 /*
38 * forked 3-level task, fall through to set new asce with new 38 * forked 3-level task, fall through to set new asce with new
39 * mm->pgd 39 * mm->pgd
@@ -44,12 +44,12 @@ static inline int init_new_context(struct task_struct *tsk,
44 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 44 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
45 _ASCE_USER_BITS | _ASCE_TYPE_REGION3; 45 _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
46 break; 46 break;
47 case 1UL << 53: 47 case _REGION1_SIZE:
48 /* forked 4-level task, set new asce with new mm->pgd */ 48 /* forked 4-level task, set new asce with new mm->pgd */
49 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 49 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
50 _ASCE_USER_BITS | _ASCE_TYPE_REGION2; 50 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
51 break; 51 break;
52 case 1UL << 31: 52 case _REGION3_SIZE:
53 /* forked 2-level compat task, set new asce with new mm->pgd */ 53 /* forked 2-level compat task, set new asce with new mm->pgd */
54 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 54 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
55 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT; 55 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index eccfa0642712..ead67a34781f 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -44,16 +44,16 @@ static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
44 44
45static inline void crst_table_init(unsigned long *crst, unsigned long entry) 45static inline void crst_table_init(unsigned long *crst, unsigned long entry)
46{ 46{
47 clear_table(crst, entry, sizeof(unsigned long)*2048); 47 clear_table(crst, entry, _CRST_TABLE_SIZE);
48} 48}
49 49
50static inline unsigned long pgd_entry_type(struct mm_struct *mm) 50static inline unsigned long pgd_entry_type(struct mm_struct *mm)
51{ 51{
52 if (mm->context.asce_limit <= (1UL << 31)) 52 if (mm->context.asce_limit <= _REGION3_SIZE)
53 return _SEGMENT_ENTRY_EMPTY; 53 return _SEGMENT_ENTRY_EMPTY;
54 if (mm->context.asce_limit <= (1UL << 42)) 54 if (mm->context.asce_limit <= _REGION2_SIZE)
55 return _REGION3_ENTRY_EMPTY; 55 return _REGION3_ENTRY_EMPTY;
56 if (mm->context.asce_limit <= (1UL << 53)) 56 if (mm->context.asce_limit <= _REGION1_SIZE)
57 return _REGION2_ENTRY_EMPTY; 57 return _REGION2_ENTRY_EMPTY;
58 return _REGION1_ENTRY_EMPTY; 58 return _REGION1_ENTRY_EMPTY;
59} 59}
@@ -121,7 +121,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
121 121
122 if (!table) 122 if (!table)
123 return NULL; 123 return NULL;
124 if (mm->context.asce_limit == (1UL << 31)) { 124 if (mm->context.asce_limit == _REGION3_SIZE) {
125 /* Forking a compat process with 2 page table levels */ 125 /* Forking a compat process with 2 page table levels */
126 if (!pgtable_pmd_page_ctor(virt_to_page(table))) { 126 if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
127 crst_table_free(mm, table); 127 crst_table_free(mm, table);
@@ -133,7 +133,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
133 133
134static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) 134static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
135{ 135{
136 if (mm->context.asce_limit == (1UL << 31)) 136 if (mm->context.asce_limit == _REGION3_SIZE)
137 pgtable_pmd_page_dtor(virt_to_page(pgd)); 137 pgtable_pmd_page_dtor(virt_to_page(pgd));
138 crst_table_free(mm, (unsigned long *) pgd); 138 crst_table_free(mm, (unsigned long *) pgd);
139} 139}
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 998b61cd0e56..eaee69e7c42a 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -80,7 +80,7 @@ struct qdr {
80 u32 qkey : 4; 80 u32 qkey : 4;
81 u32 : 28; 81 u32 : 28;
82 struct qdesfmt0 qdf0[126]; 82 struct qdesfmt0 qdf0[126];
83} __attribute__ ((packed, aligned(4096))); 83} __packed __aligned(PAGE_SIZE);
84 84
85#define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40 85#define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40
86#define QIB_RFLAGS_ENABLE_QEBSM 0x80 86#define QIB_RFLAGS_ENABLE_QEBSM 0x80
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 7317b3108a88..950af48e88be 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -130,7 +130,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
130static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, 130static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
131 unsigned long address) 131 unsigned long address)
132{ 132{
133 if (tlb->mm->context.asce_limit <= (1UL << 31)) 133 if (tlb->mm->context.asce_limit <= _REGION3_SIZE)
134 return; 134 return;
135 pgtable_pmd_page_dtor(virt_to_page(pmd)); 135 pgtable_pmd_page_dtor(virt_to_page(pmd));
136 tlb_remove_table(tlb, pmd); 136 tlb_remove_table(tlb, pmd);
@@ -146,7 +146,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
146static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, 146static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
147 unsigned long address) 147 unsigned long address)
148{ 148{
149 if (tlb->mm->context.asce_limit <= (1UL << 53)) 149 if (tlb->mm->context.asce_limit <= _REGION1_SIZE)
150 return; 150 return;
151 tlb_remove_table(tlb, p4d); 151 tlb_remove_table(tlb, p4d);
152} 152}
@@ -161,7 +161,7 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
161static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, 161static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
162 unsigned long address) 162 unsigned long address)
163{ 163{
164 if (tlb->mm->context.asce_limit <= (1UL << 42)) 164 if (tlb->mm->context.asce_limit <= _REGION2_SIZE)
165 return; 165 return;
166 tlb_remove_table(tlb, pud); 166 tlb_remove_table(tlb, pud);
167} 167}
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index dab78babfab6..2aa545dca4d5 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -76,7 +76,7 @@ void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
76 frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); 76 frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
77#ifdef CONFIG_CHECK_STACK 77#ifdef CONFIG_CHECK_STACK
78 sp = __dump_trace(func, data, sp, 78 sp = __dump_trace(func, data, sp,
79 S390_lowcore.panic_stack + frame_size - 4096, 79 S390_lowcore.panic_stack + frame_size - PAGE_SIZE,
80 S390_lowcore.panic_stack + frame_size); 80 S390_lowcore.panic_stack + frame_size);
81#endif 81#endif
82 sp = __dump_trace(func, data, sp, 82 sp = __dump_trace(func, data, sp,
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
index cfac28330b03..4bdc65636603 100644
--- a/arch/s390/kernel/relocate_kernel.S
+++ b/arch/s390/kernel/relocate_kernel.S
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <asm/page.h>
10#include <asm/sigp.h> 11#include <asm/sigp.h>
11 12
12/* 13/*
@@ -55,8 +56,8 @@ ENTRY(relocate_kernel)
55 .back_pgm: 56 .back_pgm:
56 lmg %r0,%r15,gprregs-.base(%r13) 57 lmg %r0,%r15,gprregs-.base(%r13)
57 .top: 58 .top:
58 lghi %r7,4096 # load PAGE_SIZE in r7 59 lghi %r7,PAGE_SIZE # load PAGE_SIZE in r7
59 lghi %r9,4096 # load PAGE_SIZE in r9 60 lghi %r9,PAGE_SIZE # load PAGE_SIZE in r9
60 lg %r5,0(%r2) # read another word for indirection page 61 lg %r5,0(%r2) # read another word for indirection page
61 aghi %r2,8 # increment pointer 62 aghi %r2,8 # increment pointer
62 tml %r5,0x1 # is it a destination page? 63 tml %r5,0x1 # is it a destination page?
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 3d1d808ea8a9..bc1c95b7a4bd 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -305,7 +305,7 @@ static void __init setup_lowcore(void)
305 /* 305 /*
306 * Setup lowcore for boot cpu 306 * Setup lowcore for boot cpu
307 */ 307 */
308 BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096); 308 BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
309 lc = memblock_virt_alloc_low(sizeof(*lc), sizeof(*lc)); 309 lc = memblock_virt_alloc_low(sizeof(*lc), sizeof(*lc));
310 lc->restart_psw.mask = PSW_KERNEL_BITS; 310 lc->restart_psw.mask = PSW_KERNEL_BITS;
311 lc->restart_psw.addr = (unsigned long) restart_int_handler; 311 lc->restart_psw.addr = (unsigned long) restart_int_handler;
@@ -469,10 +469,10 @@ static void __init setup_memory_end(void)
469 vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN; 469 vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
470 tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE; 470 tmp = (memory_end ?: max_physmem_end) / PAGE_SIZE;
471 tmp = tmp * (sizeof(struct page) + PAGE_SIZE); 471 tmp = tmp * (sizeof(struct page) + PAGE_SIZE);
472 if (tmp + vmalloc_size + MODULES_LEN <= (1UL << 42)) 472 if (tmp + vmalloc_size + MODULES_LEN <= _REGION2_SIZE)
473 vmax = 1UL << 42; /* 3-level kernel page table */ 473 vmax = _REGION2_SIZE; /* 3-level kernel page table */
474 else 474 else
475 vmax = 1UL << 53; /* 4-level kernel page table */ 475 vmax = _REGION1_SIZE; /* 4-level kernel page table */
476 /* module area is at the end of the kernel address space. */ 476 /* module area is at the end of the kernel address space. */
477 MODULES_END = vmax; 477 MODULES_END = vmax;
478 MODULES_VADDR = MODULES_END - MODULES_LEN; 478 MODULES_VADDR = MODULES_END - MODULES_LEN;
diff --git a/arch/s390/kernel/vdso32/vdso32.lds.S b/arch/s390/kernel/vdso32/vdso32.lds.S
index 8f048c2d6d13..263a7f9eee1e 100644
--- a/arch/s390/kernel/vdso32/vdso32.lds.S
+++ b/arch/s390/kernel/vdso32/vdso32.lds.S
@@ -2,6 +2,8 @@
2 * This is the infamous ld script for the 32 bits vdso 2 * This is the infamous ld script for the 32 bits vdso
3 * library 3 * library
4 */ 4 */
5
6#include <asm/page.h>
5#include <asm/vdso.h> 7#include <asm/vdso.h>
6 8
7OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") 9OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
@@ -91,7 +93,7 @@ SECTIONS
91 .debug_ranges 0 : { *(.debug_ranges) } 93 .debug_ranges 0 : { *(.debug_ranges) }
92 .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } 94 .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
93 95
94 . = ALIGN(4096); 96 . = ALIGN(PAGE_SIZE);
95 PROVIDE(_vdso_data = .); 97 PROVIDE(_vdso_data = .);
96 98
97 /DISCARD/ : { 99 /DISCARD/ : {
diff --git a/arch/s390/kernel/vdso64/vdso64.lds.S b/arch/s390/kernel/vdso64/vdso64.lds.S
index f35455d497fe..9e3dbbcc1cfc 100644
--- a/arch/s390/kernel/vdso64/vdso64.lds.S
+++ b/arch/s390/kernel/vdso64/vdso64.lds.S
@@ -2,6 +2,8 @@
2 * This is the infamous ld script for the 64 bits vdso 2 * This is the infamous ld script for the 64 bits vdso
3 * library 3 * library
4 */ 4 */
5
6#include <asm/page.h>
5#include <asm/vdso.h> 7#include <asm/vdso.h>
6 8
7OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") 9OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
@@ -91,7 +93,7 @@ SECTIONS
91 .debug_ranges 0 : { *(.debug_ranges) } 93 .debug_ranges 0 : { *(.debug_ranges) }
92 .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } 94 .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
93 95
94 . = ALIGN(4096); 96 . = ALIGN(PAGE_SIZE);
95 PROVIDE(_vdso_data = .); 97 PROVIDE(_vdso_data = .);
96 98
97 /DISCARD/ : { 99 /DISCARD/ : {
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 14f25798b001..bdabb013537b 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -135,7 +135,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address)
135 pr_alert("AS:%016lx ", asce); 135 pr_alert("AS:%016lx ", asce);
136 switch (asce & _ASCE_TYPE_MASK) { 136 switch (asce & _ASCE_TYPE_MASK) {
137 case _ASCE_TYPE_REGION1: 137 case _ASCE_TYPE_REGION1:
138 table = table + ((address >> 53) & 0x7ff); 138 table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
139 if (bad_address(table)) 139 if (bad_address(table))
140 goto bad; 140 goto bad;
141 pr_cont("R1:%016lx ", *table); 141 pr_cont("R1:%016lx ", *table);
@@ -144,7 +144,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address)
144 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 144 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
145 /* fallthrough */ 145 /* fallthrough */
146 case _ASCE_TYPE_REGION2: 146 case _ASCE_TYPE_REGION2:
147 table = table + ((address >> 42) & 0x7ff); 147 table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
148 if (bad_address(table)) 148 if (bad_address(table))
149 goto bad; 149 goto bad;
150 pr_cont("R2:%016lx ", *table); 150 pr_cont("R2:%016lx ", *table);
@@ -153,7 +153,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address)
153 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 153 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
154 /* fallthrough */ 154 /* fallthrough */
155 case _ASCE_TYPE_REGION3: 155 case _ASCE_TYPE_REGION3:
156 table = table + ((address >> 31) & 0x7ff); 156 table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
157 if (bad_address(table)) 157 if (bad_address(table))
158 goto bad; 158 goto bad;
159 pr_cont("R3:%016lx ", *table); 159 pr_cont("R3:%016lx ", *table);
@@ -162,7 +162,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address)
162 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 162 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
163 /* fallthrough */ 163 /* fallthrough */
164 case _ASCE_TYPE_SEGMENT: 164 case _ASCE_TYPE_SEGMENT:
165 table = table + ((address >> 20) & 0x7ff); 165 table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
166 if (bad_address(table)) 166 if (bad_address(table))
167 goto bad; 167 goto bad;
168 pr_cont("S:%016lx ", *table); 168 pr_cont("S:%016lx ", *table);
@@ -170,7 +170,7 @@ static void dump_pagetable(unsigned long asce, unsigned long address)
170 goto out; 170 goto out;
171 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); 171 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
172 } 172 }
173 table = table + ((address >> 12) & 0xff); 173 table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
174 if (bad_address(table)) 174 if (bad_address(table))
175 goto bad; 175 goto bad;
176 pr_cont("P:%016lx ", *table); 176 pr_cont("P:%016lx ", *table);
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 4fb3d3cdb370..53292c03e312 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -36,16 +36,16 @@ static struct gmap *gmap_alloc(unsigned long limit)
36 unsigned long *table; 36 unsigned long *table;
37 unsigned long etype, atype; 37 unsigned long etype, atype;
38 38
39 if (limit < (1UL << 31)) { 39 if (limit < _REGION3_SIZE) {
40 limit = (1UL << 31) - 1; 40 limit = _REGION3_SIZE - 1;
41 atype = _ASCE_TYPE_SEGMENT; 41 atype = _ASCE_TYPE_SEGMENT;
42 etype = _SEGMENT_ENTRY_EMPTY; 42 etype = _SEGMENT_ENTRY_EMPTY;
43 } else if (limit < (1UL << 42)) { 43 } else if (limit < _REGION2_SIZE) {
44 limit = (1UL << 42) - 1; 44 limit = _REGION2_SIZE - 1;
45 atype = _ASCE_TYPE_REGION3; 45 atype = _ASCE_TYPE_REGION3;
46 etype = _REGION3_ENTRY_EMPTY; 46 etype = _REGION3_ENTRY_EMPTY;
47 } else if (limit < (1UL << 53)) { 47 } else if (limit < _REGION1_SIZE) {
48 limit = (1UL << 53) - 1; 48 limit = _REGION1_SIZE - 1;
49 atype = _ASCE_TYPE_REGION2; 49 atype = _ASCE_TYPE_REGION2;
50 etype = _REGION2_ENTRY_EMPTY; 50 etype = _REGION2_ENTRY_EMPTY;
51 } else { 51 } else {
@@ -65,7 +65,7 @@ static struct gmap *gmap_alloc(unsigned long limit)
65 spin_lock_init(&gmap->guest_table_lock); 65 spin_lock_init(&gmap->guest_table_lock);
66 spin_lock_init(&gmap->shadow_lock); 66 spin_lock_init(&gmap->shadow_lock);
67 atomic_set(&gmap->ref_count, 1); 67 atomic_set(&gmap->ref_count, 1);
68 page = alloc_pages(GFP_KERNEL, 2); 68 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
69 if (!page) 69 if (!page)
70 goto out_free; 70 goto out_free;
71 page->index = 0; 71 page->index = 0;
@@ -186,7 +186,7 @@ static void gmap_free(struct gmap *gmap)
186 gmap_flush_tlb(gmap); 186 gmap_flush_tlb(gmap);
187 /* Free all segment & region tables. */ 187 /* Free all segment & region tables. */
188 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) 188 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
189 __free_pages(page, 2); 189 __free_pages(page, CRST_ALLOC_ORDER);
190 gmap_radix_tree_free(&gmap->guest_to_host); 190 gmap_radix_tree_free(&gmap->guest_to_host);
191 gmap_radix_tree_free(&gmap->host_to_guest); 191 gmap_radix_tree_free(&gmap->host_to_guest);
192 192
@@ -306,7 +306,7 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
306 unsigned long *new; 306 unsigned long *new;
307 307
308 /* since we dont free the gmap table until gmap_free we can unlock */ 308 /* since we dont free the gmap table until gmap_free we can unlock */
309 page = alloc_pages(GFP_KERNEL, 2); 309 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
310 if (!page) 310 if (!page)
311 return -ENOMEM; 311 return -ENOMEM;
312 new = (unsigned long *) page_to_phys(page); 312 new = (unsigned long *) page_to_phys(page);
@@ -321,7 +321,7 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
321 } 321 }
322 spin_unlock(&gmap->guest_table_lock); 322 spin_unlock(&gmap->guest_table_lock);
323 if (page) 323 if (page)
324 __free_pages(page, 2); 324 __free_pages(page, CRST_ALLOC_ORDER);
325 return 0; 325 return 0;
326} 326}
327 327
@@ -546,30 +546,30 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
546 /* Create higher level tables in the gmap page table */ 546 /* Create higher level tables in the gmap page table */
547 table = gmap->table; 547 table = gmap->table;
548 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) { 548 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
549 table += (gaddr >> 53) & 0x7ff; 549 table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
550 if ((*table & _REGION_ENTRY_INVALID) && 550 if ((*table & _REGION_ENTRY_INVALID) &&
551 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY, 551 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
552 gaddr & 0xffe0000000000000UL)) 552 gaddr & _REGION1_MASK))
553 return -ENOMEM; 553 return -ENOMEM;
554 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 554 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
555 } 555 }
556 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) { 556 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
557 table += (gaddr >> 42) & 0x7ff; 557 table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
558 if ((*table & _REGION_ENTRY_INVALID) && 558 if ((*table & _REGION_ENTRY_INVALID) &&
559 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY, 559 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
560 gaddr & 0xfffffc0000000000UL)) 560 gaddr & _REGION2_MASK))
561 return -ENOMEM; 561 return -ENOMEM;
562 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 562 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
563 } 563 }
564 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) { 564 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
565 table += (gaddr >> 31) & 0x7ff; 565 table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
566 if ((*table & _REGION_ENTRY_INVALID) && 566 if ((*table & _REGION_ENTRY_INVALID) &&
567 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY, 567 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
568 gaddr & 0xffffffff80000000UL)) 568 gaddr & _REGION3_MASK))
569 return -ENOMEM; 569 return -ENOMEM;
570 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 570 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
571 } 571 }
572 table += (gaddr >> 20) & 0x7ff; 572 table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
573 /* Walk the parent mm page table */ 573 /* Walk the parent mm page table */
574 mm = gmap->mm; 574 mm = gmap->mm;
575 pgd = pgd_offset(mm, vmaddr); 575 pgd = pgd_offset(mm, vmaddr);
@@ -771,7 +771,7 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap,
771 table = gmap->table; 771 table = gmap->table;
772 switch (gmap->asce & _ASCE_TYPE_MASK) { 772 switch (gmap->asce & _ASCE_TYPE_MASK) {
773 case _ASCE_TYPE_REGION1: 773 case _ASCE_TYPE_REGION1:
774 table += (gaddr >> 53) & 0x7ff; 774 table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
775 if (level == 4) 775 if (level == 4)
776 break; 776 break;
777 if (*table & _REGION_ENTRY_INVALID) 777 if (*table & _REGION_ENTRY_INVALID)
@@ -779,7 +779,7 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap,
779 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 779 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
780 /* Fallthrough */ 780 /* Fallthrough */
781 case _ASCE_TYPE_REGION2: 781 case _ASCE_TYPE_REGION2:
782 table += (gaddr >> 42) & 0x7ff; 782 table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
783 if (level == 3) 783 if (level == 3)
784 break; 784 break;
785 if (*table & _REGION_ENTRY_INVALID) 785 if (*table & _REGION_ENTRY_INVALID)
@@ -787,7 +787,7 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap,
787 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 787 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
788 /* Fallthrough */ 788 /* Fallthrough */
789 case _ASCE_TYPE_REGION3: 789 case _ASCE_TYPE_REGION3:
790 table += (gaddr >> 31) & 0x7ff; 790 table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
791 if (level == 2) 791 if (level == 2)
792 break; 792 break;
793 if (*table & _REGION_ENTRY_INVALID) 793 if (*table & _REGION_ENTRY_INVALID)
@@ -795,13 +795,13 @@ static inline unsigned long *gmap_table_walk(struct gmap *gmap,
795 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 795 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
796 /* Fallthrough */ 796 /* Fallthrough */
797 case _ASCE_TYPE_SEGMENT: 797 case _ASCE_TYPE_SEGMENT:
798 table += (gaddr >> 20) & 0x7ff; 798 table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
799 if (level == 1) 799 if (level == 1)
800 break; 800 break;
801 if (*table & _REGION_ENTRY_INVALID) 801 if (*table & _REGION_ENTRY_INVALID)
802 return NULL; 802 return NULL;
803 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); 803 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
804 table += (gaddr >> 12) & 0xff; 804 table += (gaddr & _PAGE_INDEX) >> _PAGE_SHIFT;
805 } 805 }
806 return table; 806 return table;
807} 807}
@@ -1126,7 +1126,7 @@ static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
1126 table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */ 1126 table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
1127 if (!table || *table & _PAGE_INVALID) 1127 if (!table || *table & _PAGE_INVALID)
1128 return; 1128 return;
1129 gmap_call_notifier(sg, raddr, raddr + (1UL << 12) - 1); 1129 gmap_call_notifier(sg, raddr, raddr + _PAGE_SIZE - 1);
1130 ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table); 1130 ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
1131} 1131}
1132 1132
@@ -1144,7 +1144,7 @@ static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
1144 int i; 1144 int i;
1145 1145
1146 BUG_ON(!gmap_is_shadow(sg)); 1146 BUG_ON(!gmap_is_shadow(sg));
1147 for (i = 0; i < 256; i++, raddr += 1UL << 12) 1147 for (i = 0; i < _PAGE_ENTRIES; i++, raddr += _PAGE_SIZE)
1148 pgt[i] = _PAGE_INVALID; 1148 pgt[i] = _PAGE_INVALID;
1149} 1149}
1150 1150
@@ -1164,8 +1164,8 @@ static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
1164 ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */ 1164 ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
1165 if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN)) 1165 if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
1166 return; 1166 return;
1167 gmap_call_notifier(sg, raddr, raddr + (1UL << 20) - 1); 1167 gmap_call_notifier(sg, raddr, raddr + _SEGMENT_SIZE - 1);
1168 sto = (unsigned long) (ste - ((raddr >> 20) & 0x7ff)); 1168 sto = (unsigned long) (ste - ((raddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT));
1169 gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr); 1169 gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
1170 pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN); 1170 pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
1171 *ste = _SEGMENT_ENTRY_EMPTY; 1171 *ste = _SEGMENT_ENTRY_EMPTY;
@@ -1193,7 +1193,7 @@ static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
1193 1193
1194 BUG_ON(!gmap_is_shadow(sg)); 1194 BUG_ON(!gmap_is_shadow(sg));
1195 asce = (unsigned long) sgt | _ASCE_TYPE_SEGMENT; 1195 asce = (unsigned long) sgt | _ASCE_TYPE_SEGMENT;
1196 for (i = 0; i < 2048; i++, raddr += 1UL << 20) { 1196 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) {
1197 if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN)) 1197 if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
1198 continue; 1198 continue;
1199 pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN); 1199 pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
@@ -1222,8 +1222,8 @@ static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
1222 r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */ 1222 r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
1223 if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN)) 1223 if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
1224 return; 1224 return;
1225 gmap_call_notifier(sg, raddr, raddr + (1UL << 31) - 1); 1225 gmap_call_notifier(sg, raddr, raddr + _REGION3_SIZE - 1);
1226 r3o = (unsigned long) (r3e - ((raddr >> 31) & 0x7ff)); 1226 r3o = (unsigned long) (r3e - ((raddr & _REGION3_INDEX) >> _REGION3_SHIFT));
1227 gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr); 1227 gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
1228 sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN); 1228 sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
1229 *r3e = _REGION3_ENTRY_EMPTY; 1229 *r3e = _REGION3_ENTRY_EMPTY;
@@ -1231,7 +1231,7 @@ static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
1231 /* Free segment table */ 1231 /* Free segment table */
1232 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT); 1232 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1233 list_del(&page->lru); 1233 list_del(&page->lru);
1234 __free_pages(page, 2); 1234 __free_pages(page, CRST_ALLOC_ORDER);
1235} 1235}
1236 1236
1237/** 1237/**
@@ -1251,7 +1251,7 @@ static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
1251 1251
1252 BUG_ON(!gmap_is_shadow(sg)); 1252 BUG_ON(!gmap_is_shadow(sg));
1253 asce = (unsigned long) r3t | _ASCE_TYPE_REGION3; 1253 asce = (unsigned long) r3t | _ASCE_TYPE_REGION3;
1254 for (i = 0; i < 2048; i++, raddr += 1UL << 31) { 1254 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) {
1255 if (!(r3t[i] & _REGION_ENTRY_ORIGIN)) 1255 if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
1256 continue; 1256 continue;
1257 sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN); 1257 sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
@@ -1260,7 +1260,7 @@ static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
1260 /* Free segment table */ 1260 /* Free segment table */
1261 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT); 1261 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1262 list_del(&page->lru); 1262 list_del(&page->lru);
1263 __free_pages(page, 2); 1263 __free_pages(page, CRST_ALLOC_ORDER);
1264 } 1264 }
1265} 1265}
1266 1266
@@ -1280,8 +1280,8 @@ static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
1280 r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */ 1280 r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
1281 if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN)) 1281 if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
1282 return; 1282 return;
1283 gmap_call_notifier(sg, raddr, raddr + (1UL << 42) - 1); 1283 gmap_call_notifier(sg, raddr, raddr + _REGION2_SIZE - 1);
1284 r2o = (unsigned long) (r2e - ((raddr >> 42) & 0x7ff)); 1284 r2o = (unsigned long) (r2e - ((raddr & _REGION2_INDEX) >> _REGION2_SHIFT));
1285 gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr); 1285 gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
1286 r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN); 1286 r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
1287 *r2e = _REGION2_ENTRY_EMPTY; 1287 *r2e = _REGION2_ENTRY_EMPTY;
@@ -1289,7 +1289,7 @@ static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
1289 /* Free region 3 table */ 1289 /* Free region 3 table */
1290 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT); 1290 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1291 list_del(&page->lru); 1291 list_del(&page->lru);
1292 __free_pages(page, 2); 1292 __free_pages(page, CRST_ALLOC_ORDER);
1293} 1293}
1294 1294
1295/** 1295/**
@@ -1309,7 +1309,7 @@ static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
1309 1309
1310 BUG_ON(!gmap_is_shadow(sg)); 1310 BUG_ON(!gmap_is_shadow(sg));
1311 asce = (unsigned long) r2t | _ASCE_TYPE_REGION2; 1311 asce = (unsigned long) r2t | _ASCE_TYPE_REGION2;
1312 for (i = 0; i < 2048; i++, raddr += 1UL << 42) { 1312 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) {
1313 if (!(r2t[i] & _REGION_ENTRY_ORIGIN)) 1313 if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
1314 continue; 1314 continue;
1315 r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN); 1315 r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
@@ -1318,7 +1318,7 @@ static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
1318 /* Free region 3 table */ 1318 /* Free region 3 table */
1319 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT); 1319 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1320 list_del(&page->lru); 1320 list_del(&page->lru);
1321 __free_pages(page, 2); 1321 __free_pages(page, CRST_ALLOC_ORDER);
1322 } 1322 }
1323} 1323}
1324 1324
@@ -1338,8 +1338,8 @@ static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
1338 r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */ 1338 r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
1339 if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN)) 1339 if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
1340 return; 1340 return;
1341 gmap_call_notifier(sg, raddr, raddr + (1UL << 53) - 1); 1341 gmap_call_notifier(sg, raddr, raddr + _REGION1_SIZE - 1);
1342 r1o = (unsigned long) (r1e - ((raddr >> 53) & 0x7ff)); 1342 r1o = (unsigned long) (r1e - ((raddr & _REGION1_INDEX) >> _REGION1_SHIFT));
1343 gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr); 1343 gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
1344 r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN); 1344 r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
1345 *r1e = _REGION1_ENTRY_EMPTY; 1345 *r1e = _REGION1_ENTRY_EMPTY;
@@ -1347,7 +1347,7 @@ static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
1347 /* Free region 2 table */ 1347 /* Free region 2 table */
1348 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT); 1348 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1349 list_del(&page->lru); 1349 list_del(&page->lru);
1350 __free_pages(page, 2); 1350 __free_pages(page, CRST_ALLOC_ORDER);
1351} 1351}
1352 1352
1353/** 1353/**
@@ -1367,7 +1367,7 @@ static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
1367 1367
1368 BUG_ON(!gmap_is_shadow(sg)); 1368 BUG_ON(!gmap_is_shadow(sg));
1369 asce = (unsigned long) r1t | _ASCE_TYPE_REGION1; 1369 asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
1370 for (i = 0; i < 2048; i++, raddr += 1UL << 53) { 1370 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION1_SIZE) {
1371 if (!(r1t[i] & _REGION_ENTRY_ORIGIN)) 1371 if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
1372 continue; 1372 continue;
1373 r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN); 1373 r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
@@ -1378,7 +1378,7 @@ static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
1378 /* Free region 2 table */ 1378 /* Free region 2 table */
1379 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT); 1379 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1380 list_del(&page->lru); 1380 list_del(&page->lru);
1381 __free_pages(page, 2); 1381 __free_pages(page, CRST_ALLOC_ORDER);
1382 } 1382 }
1383} 1383}
1384 1384
@@ -1535,7 +1535,7 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
1535 /* protect after insertion, so it will get properly invalidated */ 1535 /* protect after insertion, so it will get properly invalidated */
1536 down_read(&parent->mm->mmap_sem); 1536 down_read(&parent->mm->mmap_sem);
1537 rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN, 1537 rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
1538 ((asce & _ASCE_TABLE_LENGTH) + 1) * 4096, 1538 ((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
1539 PROT_READ, PGSTE_VSIE_BIT); 1539 PROT_READ, PGSTE_VSIE_BIT);
1540 up_read(&parent->mm->mmap_sem); 1540 up_read(&parent->mm->mmap_sem);
1541 spin_lock(&parent->shadow_lock); 1541 spin_lock(&parent->shadow_lock);
@@ -1578,7 +1578,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
1578 1578
1579 BUG_ON(!gmap_is_shadow(sg)); 1579 BUG_ON(!gmap_is_shadow(sg));
1580 /* Allocate a shadow region second table */ 1580 /* Allocate a shadow region second table */
1581 page = alloc_pages(GFP_KERNEL, 2); 1581 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
1582 if (!page) 1582 if (!page)
1583 return -ENOMEM; 1583 return -ENOMEM;
1584 page->index = r2t & _REGION_ENTRY_ORIGIN; 1584 page->index = r2t & _REGION_ENTRY_ORIGIN;
@@ -1614,10 +1614,10 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
1614 } 1614 }
1615 spin_unlock(&sg->guest_table_lock); 1615 spin_unlock(&sg->guest_table_lock);
1616 /* Make r2t read-only in parent gmap page table */ 1616 /* Make r2t read-only in parent gmap page table */
1617 raddr = (saddr & 0xffe0000000000000UL) | _SHADOW_RMAP_REGION1; 1617 raddr = (saddr & _REGION1_MASK) | _SHADOW_RMAP_REGION1;
1618 origin = r2t & _REGION_ENTRY_ORIGIN; 1618 origin = r2t & _REGION_ENTRY_ORIGIN;
1619 offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * 4096; 1619 offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1620 len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset; 1620 len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1621 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ); 1621 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1622 spin_lock(&sg->guest_table_lock); 1622 spin_lock(&sg->guest_table_lock);
1623 if (!rc) { 1623 if (!rc) {
@@ -1634,7 +1634,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
1634 return rc; 1634 return rc;
1635out_free: 1635out_free:
1636 spin_unlock(&sg->guest_table_lock); 1636 spin_unlock(&sg->guest_table_lock);
1637 __free_pages(page, 2); 1637 __free_pages(page, CRST_ALLOC_ORDER);
1638 return rc; 1638 return rc;
1639} 1639}
1640EXPORT_SYMBOL_GPL(gmap_shadow_r2t); 1640EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
@@ -1662,7 +1662,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
1662 1662
1663 BUG_ON(!gmap_is_shadow(sg)); 1663 BUG_ON(!gmap_is_shadow(sg));
1664 /* Allocate a shadow region second table */ 1664 /* Allocate a shadow region second table */
1665 page = alloc_pages(GFP_KERNEL, 2); 1665 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
1666 if (!page) 1666 if (!page)
1667 return -ENOMEM; 1667 return -ENOMEM;
1668 page->index = r3t & _REGION_ENTRY_ORIGIN; 1668 page->index = r3t & _REGION_ENTRY_ORIGIN;
@@ -1697,10 +1697,10 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
1697 } 1697 }
1698 spin_unlock(&sg->guest_table_lock); 1698 spin_unlock(&sg->guest_table_lock);
1699 /* Make r3t read-only in parent gmap page table */ 1699 /* Make r3t read-only in parent gmap page table */
1700 raddr = (saddr & 0xfffffc0000000000UL) | _SHADOW_RMAP_REGION2; 1700 raddr = (saddr & _REGION2_MASK) | _SHADOW_RMAP_REGION2;
1701 origin = r3t & _REGION_ENTRY_ORIGIN; 1701 origin = r3t & _REGION_ENTRY_ORIGIN;
1702 offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * 4096; 1702 offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1703 len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset; 1703 len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1704 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ); 1704 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1705 spin_lock(&sg->guest_table_lock); 1705 spin_lock(&sg->guest_table_lock);
1706 if (!rc) { 1706 if (!rc) {
@@ -1717,7 +1717,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
1717 return rc; 1717 return rc;
1718out_free: 1718out_free:
1719 spin_unlock(&sg->guest_table_lock); 1719 spin_unlock(&sg->guest_table_lock);
1720 __free_pages(page, 2); 1720 __free_pages(page, CRST_ALLOC_ORDER);
1721 return rc; 1721 return rc;
1722} 1722}
1723EXPORT_SYMBOL_GPL(gmap_shadow_r3t); 1723EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
@@ -1745,7 +1745,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
1745 1745
1746 BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE)); 1746 BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
1747 /* Allocate a shadow segment table */ 1747 /* Allocate a shadow segment table */
1748 page = alloc_pages(GFP_KERNEL, 2); 1748 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
1749 if (!page) 1749 if (!page)
1750 return -ENOMEM; 1750 return -ENOMEM;
1751 page->index = sgt & _REGION_ENTRY_ORIGIN; 1751 page->index = sgt & _REGION_ENTRY_ORIGIN;
@@ -1781,10 +1781,10 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
1781 } 1781 }
1782 spin_unlock(&sg->guest_table_lock); 1782 spin_unlock(&sg->guest_table_lock);
1783 /* Make sgt read-only in parent gmap page table */ 1783 /* Make sgt read-only in parent gmap page table */
1784 raddr = (saddr & 0xffffffff80000000UL) | _SHADOW_RMAP_REGION3; 1784 raddr = (saddr & _REGION3_MASK) | _SHADOW_RMAP_REGION3;
1785 origin = sgt & _REGION_ENTRY_ORIGIN; 1785 origin = sgt & _REGION_ENTRY_ORIGIN;
1786 offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * 4096; 1786 offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1787 len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset; 1787 len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1788 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ); 1788 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1789 spin_lock(&sg->guest_table_lock); 1789 spin_lock(&sg->guest_table_lock);
1790 if (!rc) { 1790 if (!rc) {
@@ -1801,7 +1801,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
1801 return rc; 1801 return rc;
1802out_free: 1802out_free:
1803 spin_unlock(&sg->guest_table_lock); 1803 spin_unlock(&sg->guest_table_lock);
1804 __free_pages(page, 2); 1804 __free_pages(page, CRST_ALLOC_ORDER);
1805 return rc; 1805 return rc;
1806} 1806}
1807EXPORT_SYMBOL_GPL(gmap_shadow_sgt); 1807EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
@@ -1902,7 +1902,7 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
1902 } 1902 }
1903 spin_unlock(&sg->guest_table_lock); 1903 spin_unlock(&sg->guest_table_lock);
1904 /* Make pgt read-only in parent gmap page table (not the pgste) */ 1904 /* Make pgt read-only in parent gmap page table (not the pgste) */
1905 raddr = (saddr & 0xfffffffffff00000UL) | _SHADOW_RMAP_SEGMENT; 1905 raddr = (saddr & _SEGMENT_MASK) | _SHADOW_RMAP_SEGMENT;
1906 origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK; 1906 origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
1907 rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE, PROT_READ); 1907 rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE, PROT_READ);
1908 spin_lock(&sg->guest_table_lock); 1908 spin_lock(&sg->guest_table_lock);
@@ -2021,7 +2021,7 @@ static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
2021 } 2021 }
2022 /* Check for top level table */ 2022 /* Check for top level table */
2023 start = sg->orig_asce & _ASCE_ORIGIN; 2023 start = sg->orig_asce & _ASCE_ORIGIN;
2024 end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * 4096; 2024 end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE;
2025 if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start && 2025 if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
2026 gaddr < end) { 2026 gaddr < end) {
2027 /* The complete shadow table has to go */ 2027 /* The complete shadow table has to go */
@@ -2032,7 +2032,7 @@ static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
2032 return; 2032 return;
2033 } 2033 }
2034 /* Remove the page table tree from on specific entry */ 2034 /* Remove the page table tree from on specific entry */
2035 head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> 12); 2035 head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
2036 gmap_for_each_rmap_safe(rmap, rnext, head) { 2036 gmap_for_each_rmap_safe(rmap, rnext, head) {
2037 bits = rmap->raddr & _SHADOW_RMAP_MASK; 2037 bits = rmap->raddr & _SHADOW_RMAP_MASK;
2038 raddr = rmap->raddr ^ bits; 2038 raddr = rmap->raddr ^ bits;
@@ -2076,7 +2076,7 @@ void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
2076 struct gmap *gmap, *sg, *next; 2076 struct gmap *gmap, *sg, *next;
2077 2077
2078 offset = ((unsigned long) pte) & (255 * sizeof(pte_t)); 2078 offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
2079 offset = offset * (4096 / sizeof(pte_t)); 2079 offset = offset * (PAGE_SIZE / sizeof(pte_t));
2080 rcu_read_lock(); 2080 rcu_read_lock();
2081 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) { 2081 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2082 spin_lock(&gmap->guest_table_lock); 2082 spin_lock(&gmap->guest_table_lock);
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 3aee54b2ba60..c52a6b834f08 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -84,7 +84,7 @@ void __init paging_init(void)
84 psw_t psw; 84 psw_t psw;
85 85
86 init_mm.pgd = swapper_pg_dir; 86 init_mm.pgd = swapper_pg_dir;
87 if (VMALLOC_END > (1UL << 42)) { 87 if (VMALLOC_END > _REGION2_SIZE) {
88 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; 88 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
89 pgd_type = _REGION2_ENTRY_EMPTY; 89 pgd_type = _REGION2_ENTRY_EMPTY;
90 } else { 90 } else {
@@ -93,8 +93,7 @@ void __init paging_init(void)
93 } 93 }
94 init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; 94 init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
95 S390_lowcore.kernel_asce = init_mm.context.asce; 95 S390_lowcore.kernel_asce = init_mm.context.asce;
96 clear_table((unsigned long *) init_mm.pgd, pgd_type, 96 crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
97 sizeof(unsigned long)*2048);
98 vmem_map_init(); 97 vmem_map_init();
99 98
100 /* enable virtual mapping in kernel mode */ 99 /* enable virtual mapping in kernel mode */
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index a4de34ce392c..c5b74dd61197 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -83,7 +83,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
83 int rc, notify; 83 int rc, notify;
84 84
85 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */ 85 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
86 BUG_ON(mm->context.asce_limit < (1UL << 42)); 86 BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
87 if (end >= TASK_SIZE_MAX) 87 if (end >= TASK_SIZE_MAX)
88 return -ENOMEM; 88 return -ENOMEM;
89 rc = 0; 89 rc = 0;
@@ -96,11 +96,11 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
96 } 96 }
97 spin_lock_bh(&mm->page_table_lock); 97 spin_lock_bh(&mm->page_table_lock);
98 pgd = (unsigned long *) mm->pgd; 98 pgd = (unsigned long *) mm->pgd;
99 if (mm->context.asce_limit == (1UL << 42)) { 99 if (mm->context.asce_limit == _REGION2_SIZE) {
100 crst_table_init(table, _REGION2_ENTRY_EMPTY); 100 crst_table_init(table, _REGION2_ENTRY_EMPTY);
101 p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd); 101 p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd);
102 mm->pgd = (pgd_t *) table; 102 mm->pgd = (pgd_t *) table;
103 mm->context.asce_limit = 1UL << 53; 103 mm->context.asce_limit = _REGION1_SIZE;
104 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 104 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
105 _ASCE_USER_BITS | _ASCE_TYPE_REGION2; 105 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
106 } else { 106 } else {
@@ -124,7 +124,7 @@ void crst_table_downgrade(struct mm_struct *mm)
124 pgd_t *pgd; 124 pgd_t *pgd;
125 125
126 /* downgrade should only happen from 3 to 2 levels (compat only) */ 126 /* downgrade should only happen from 3 to 2 levels (compat only) */
127 BUG_ON(mm->context.asce_limit != (1UL << 42)); 127 BUG_ON(mm->context.asce_limit != _REGION2_SIZE);
128 128
129 if (current->active_mm == mm) { 129 if (current->active_mm == mm) {
130 clear_user_asce(); 130 clear_user_asce();
@@ -133,7 +133,7 @@ void crst_table_downgrade(struct mm_struct *mm)
133 133
134 pgd = mm->pgd; 134 pgd = mm->pgd;
135 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); 135 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
136 mm->context.asce_limit = 1UL << 31; 136 mm->context.asce_limit = _REGION3_SIZE;
137 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | 137 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
138 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT; 138 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
139 crst_table_free(mm, (unsigned long *) pgd); 139 crst_table_free(mm, (unsigned long *) pgd);