aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2015-02-12 07:08:27 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2015-03-25 06:49:33 -0400
commit5a79859ae0f35d25c67a03e82bf0c80592f16a39 (patch)
tree37264d49f069812f19ced94e6ae171814fb7e498 /arch/s390/mm
parent1833c9f647e9bda1cd24653ff8f9c207b5f5b911 (diff)
s390: remove 31 bit support
Remove the 31 bit support in order to reduce maintenance cost and effectively remove dead code. Since a couple of years there is no distribution left that comes with a 31 bit kernel. The 31 bit kernel also has been broken since more than a year before anybody noticed. In addition I added a removal warning to the kernel shown at ipl for 5 minutes: a960062e5826 ("s390: add 31 bit warning message") which let everybody know about the plan to remove 31 bit code. We didn't get any response. Given that the last 31 bit only machine was introduced in 1999 let's remove the code. Anybody with 31 bit user space code can still use the compat mode. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/dump_pagetables.c24
-rw-r--r--arch/s390/mm/extmem.c14
-rw-r--r--arch/s390/mm/fault.c36
-rw-r--r--arch/s390/mm/gup.c4
-rw-r--r--arch/s390/mm/init.c5
-rw-r--r--arch/s390/mm/mem_detect.c4
-rw-r--r--arch/s390/mm/mmap.c25
-rw-r--r--arch/s390/mm/pageattr.c2
-rw-r--r--arch/s390/mm/pgtable.c8
-rw-r--r--arch/s390/mm/vmem.c10
10 files changed, 5 insertions, 127 deletions
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index d46cadeda204..8556d6be9b54 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -18,9 +18,7 @@ enum address_markers_idx {
18 KERNEL_END_NR, 18 KERNEL_END_NR,
19 VMEMMAP_NR, 19 VMEMMAP_NR,
20 VMALLOC_NR, 20 VMALLOC_NR,
21#ifdef CONFIG_64BIT
22 MODULES_NR, 21 MODULES_NR,
23#endif
24}; 22};
25 23
26static struct addr_marker address_markers[] = { 24static struct addr_marker address_markers[] = {
@@ -29,9 +27,7 @@ static struct addr_marker address_markers[] = {
29 [KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"}, 27 [KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"},
30 [VMEMMAP_NR] = {0, "vmemmap Area"}, 28 [VMEMMAP_NR] = {0, "vmemmap Area"},
31 [VMALLOC_NR] = {0, "vmalloc Area"}, 29 [VMALLOC_NR] = {0, "vmalloc Area"},
32#ifdef CONFIG_64BIT
33 [MODULES_NR] = {0, "Modules Area"}, 30 [MODULES_NR] = {0, "Modules Area"},
34#endif
35 { -1, NULL } 31 { -1, NULL }
36}; 32};
37 33
@@ -127,12 +123,6 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st,
127 } 123 }
128} 124}
129 125
130#ifdef CONFIG_64BIT
131#define _PMD_PROT_MASK _SEGMENT_ENTRY_PROTECT
132#else
133#define _PMD_PROT_MASK 0
134#endif
135
136static void walk_pmd_level(struct seq_file *m, struct pg_state *st, 126static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
137 pud_t *pud, unsigned long addr) 127 pud_t *pud, unsigned long addr)
138{ 128{
@@ -145,7 +135,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
145 pmd = pmd_offset(pud, addr); 135 pmd = pmd_offset(pud, addr);
146 if (!pmd_none(*pmd)) { 136 if (!pmd_none(*pmd)) {
147 if (pmd_large(*pmd)) { 137 if (pmd_large(*pmd)) {
148 prot = pmd_val(*pmd) & _PMD_PROT_MASK; 138 prot = pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT;
149 note_page(m, st, prot, 3); 139 note_page(m, st, prot, 3);
150 } else 140 } else
151 walk_pte_level(m, st, pmd, addr); 141 walk_pte_level(m, st, pmd, addr);
@@ -155,12 +145,6 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
155 } 145 }
156} 146}
157 147
158#ifdef CONFIG_64BIT
159#define _PUD_PROT_MASK _REGION3_ENTRY_RO
160#else
161#define _PUD_PROT_MASK 0
162#endif
163
164static void walk_pud_level(struct seq_file *m, struct pg_state *st, 148static void walk_pud_level(struct seq_file *m, struct pg_state *st,
165 pgd_t *pgd, unsigned long addr) 149 pgd_t *pgd, unsigned long addr)
166{ 150{
@@ -173,7 +157,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
173 pud = pud_offset(pgd, addr); 157 pud = pud_offset(pgd, addr);
174 if (!pud_none(*pud)) 158 if (!pud_none(*pud))
175 if (pud_large(*pud)) { 159 if (pud_large(*pud)) {
176 prot = pud_val(*pud) & _PUD_PROT_MASK; 160 prot = pud_val(*pud) & _REGION3_ENTRY_RO;
177 note_page(m, st, prot, 2); 161 note_page(m, st, prot, 2);
178 } else 162 } else
179 walk_pmd_level(m, st, pud, addr); 163 walk_pmd_level(m, st, pud, addr);
@@ -230,13 +214,9 @@ static int pt_dump_init(void)
230 * kernel ASCE. We need this to keep the page table walker functions 214 * kernel ASCE. We need this to keep the page table walker functions
231 * from accessing non-existent entries. 215 * from accessing non-existent entries.
232 */ 216 */
233#ifdef CONFIG_32BIT
234 max_addr = 1UL << 31;
235#else
236 max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2; 217 max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
237 max_addr = 1UL << (max_addr * 11 + 31); 218 max_addr = 1UL << (max_addr * 11 + 31);
238 address_markers[MODULES_NR].start_address = MODULES_VADDR; 219 address_markers[MODULES_NR].start_address = MODULES_VADDR;
239#endif
240 address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap; 220 address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
241 address_markers[VMALLOC_NR].start_address = VMALLOC_START; 221 address_markers[VMALLOC_NR].start_address = VMALLOC_START;
242 debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops); 222 debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 519bba716cc3..23c496957c22 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -51,7 +51,6 @@ struct qout64 {
51 struct qrange range[6]; 51 struct qrange range[6];
52}; 52};
53 53
54#ifdef CONFIG_64BIT
55struct qrange_old { 54struct qrange_old {
56 unsigned int start; /* last byte type */ 55 unsigned int start; /* last byte type */
57 unsigned int end; /* last byte reserved */ 56 unsigned int end; /* last byte reserved */
@@ -65,7 +64,6 @@ struct qout64_old {
65 int segrcnt; 64 int segrcnt;
66 struct qrange_old range[6]; 65 struct qrange_old range[6];
67}; 66};
68#endif
69 67
70struct qin64 { 68struct qin64 {
71 char qopcode; 69 char qopcode;
@@ -103,7 +101,6 @@ static int scode_set;
103static int 101static int
104dcss_set_subcodes(void) 102dcss_set_subcodes(void)
105{ 103{
106#ifdef CONFIG_64BIT
107 char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA); 104 char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA);
108 unsigned long rx, ry; 105 unsigned long rx, ry;
109 int rc; 106 int rc;
@@ -135,7 +132,6 @@ dcss_set_subcodes(void)
135 segext_scode = DCSS_SEGEXTX; 132 segext_scode = DCSS_SEGEXTX;
136 return 0; 133 return 0;
137 } 134 }
138#endif
139 /* Diag x'64' new subcodes are not supported, set to old subcodes */ 135 /* Diag x'64' new subcodes are not supported, set to old subcodes */
140 loadshr_scode = DCSS_LOADNOLY; 136 loadshr_scode = DCSS_LOADNOLY;
141 loadnsr_scode = DCSS_LOADNSR; 137 loadnsr_scode = DCSS_LOADNSR;
@@ -208,7 +204,6 @@ dcss_diag(int *func, void *parameter,
208 rx = (unsigned long) parameter; 204 rx = (unsigned long) parameter;
209 ry = (unsigned long) *func; 205 ry = (unsigned long) *func;
210 206
211#ifdef CONFIG_64BIT
212 /* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */ 207 /* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */
213 if (*func > DCSS_SEGEXT) 208 if (*func > DCSS_SEGEXT)
214 asm volatile( 209 asm volatile(
@@ -225,13 +220,6 @@ dcss_diag(int *func, void *parameter,
225 " ipm %2\n" 220 " ipm %2\n"
226 " srl %2,28\n" 221 " srl %2,28\n"
227 : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); 222 : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
228#else
229 asm volatile(
230 " diag %0,%1,0x64\n"
231 " ipm %2\n"
232 " srl %2,28\n"
233 : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
234#endif
235 *ret1 = rx; 223 *ret1 = rx;
236 *ret2 = ry; 224 *ret2 = ry;
237 return rc; 225 return rc;
@@ -281,7 +269,6 @@ query_segment_type (struct dcss_segment *seg)
281 goto out_free; 269 goto out_free;
282 } 270 }
283 271
284#ifdef CONFIG_64BIT
285 /* Only old format of output area of Diagnose x'64' is supported, 272 /* Only old format of output area of Diagnose x'64' is supported,
286 copy data for the new format. */ 273 copy data for the new format. */
287 if (segext_scode == DCSS_SEGEXT) { 274 if (segext_scode == DCSS_SEGEXT) {
@@ -307,7 +294,6 @@ query_segment_type (struct dcss_segment *seg)
307 } 294 }
308 kfree(qout_old); 295 kfree(qout_old);
309 } 296 }
310#endif
311 if (qout->segcnt > 6) { 297 if (qout->segcnt > 6) {
312 rc = -EOPNOTSUPP; 298 rc = -EOPNOTSUPP;
313 goto out_free; 299 goto out_free;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 3ff86533f7db..76515bcea2f1 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -36,15 +36,9 @@
36#include <asm/facility.h> 36#include <asm/facility.h>
37#include "../kernel/entry.h" 37#include "../kernel/entry.h"
38 38
39#ifndef CONFIG_64BIT
40#define __FAIL_ADDR_MASK 0x7ffff000
41#define __SUBCODE_MASK 0x0200
42#define __PF_RES_FIELD 0ULL
43#else /* CONFIG_64BIT */
44#define __FAIL_ADDR_MASK -4096L 39#define __FAIL_ADDR_MASK -4096L
45#define __SUBCODE_MASK 0x0600 40#define __SUBCODE_MASK 0x0600
46#define __PF_RES_FIELD 0x8000000000000000ULL 41#define __PF_RES_FIELD 0x8000000000000000ULL
47#endif /* CONFIG_64BIT */
48 42
49#define VM_FAULT_BADCONTEXT 0x010000 43#define VM_FAULT_BADCONTEXT 0x010000
50#define VM_FAULT_BADMAP 0x020000 44#define VM_FAULT_BADMAP 0x020000
@@ -54,7 +48,6 @@
54 48
55static unsigned long store_indication __read_mostly; 49static unsigned long store_indication __read_mostly;
56 50
57#ifdef CONFIG_64BIT
58static int __init fault_init(void) 51static int __init fault_init(void)
59{ 52{
60 if (test_facility(75)) 53 if (test_facility(75))
@@ -62,7 +55,6 @@ static int __init fault_init(void)
62 return 0; 55 return 0;
63} 56}
64early_initcall(fault_init); 57early_initcall(fault_init);
65#endif
66 58
67static inline int notify_page_fault(struct pt_regs *regs) 59static inline int notify_page_fault(struct pt_regs *regs)
68{ 60{
@@ -133,7 +125,6 @@ static int bad_address(void *p)
133 return probe_kernel_address((unsigned long *)p, dummy); 125 return probe_kernel_address((unsigned long *)p, dummy);
134} 126}
135 127
136#ifdef CONFIG_64BIT
137static void dump_pagetable(unsigned long asce, unsigned long address) 128static void dump_pagetable(unsigned long asce, unsigned long address)
138{ 129{
139 unsigned long *table = __va(asce & PAGE_MASK); 130 unsigned long *table = __va(asce & PAGE_MASK);
@@ -187,33 +178,6 @@ bad:
187 pr_cont("BAD\n"); 178 pr_cont("BAD\n");
188} 179}
189 180
190#else /* CONFIG_64BIT */
191
192static void dump_pagetable(unsigned long asce, unsigned long address)
193{
194 unsigned long *table = __va(asce & PAGE_MASK);
195
196 pr_alert("AS:%08lx ", asce);
197 table = table + ((address >> 20) & 0x7ff);
198 if (bad_address(table))
199 goto bad;
200 pr_cont("S:%08lx ", *table);
201 if (*table & _SEGMENT_ENTRY_INVALID)
202 goto out;
203 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
204 table = table + ((address >> 12) & 0xff);
205 if (bad_address(table))
206 goto bad;
207 pr_cont("P:%08lx ", *table);
208out:
209 pr_cont("\n");
210 return;
211bad:
212 pr_cont("BAD\n");
213}
214
215#endif /* CONFIG_64BIT */
216
217static void dump_fault_info(struct pt_regs *regs) 181static void dump_fault_info(struct pt_regs *regs)
218{ 182{
219 unsigned long asce; 183 unsigned long asce;
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 5c586c78ca8d..1eb41bb3010c 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -106,11 +106,9 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
106 pmd_t *pmdp, pmd; 106 pmd_t *pmdp, pmd;
107 107
108 pmdp = (pmd_t *) pudp; 108 pmdp = (pmd_t *) pudp;
109#ifdef CONFIG_64BIT
110 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 109 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
111 pmdp = (pmd_t *) pud_deref(pud); 110 pmdp = (pmd_t *) pud_deref(pud);
112 pmdp += pmd_index(addr); 111 pmdp += pmd_index(addr);
113#endif
114 do { 112 do {
115 pmd = *pmdp; 113 pmd = *pmdp;
116 barrier(); 114 barrier();
@@ -145,11 +143,9 @@ static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
145 pud_t *pudp, pud; 143 pud_t *pudp, pud;
146 144
147 pudp = (pud_t *) pgdp; 145 pudp = (pud_t *) pgdp;
148#ifdef CONFIG_64BIT
149 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 146 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
150 pudp = (pud_t *) pgd_deref(pgd); 147 pudp = (pud_t *) pgd_deref(pgd);
151 pudp += pud_index(addr); 148 pudp += pud_index(addr);
152#endif
153 do { 149 do {
154 pud = *pudp; 150 pud = *pudp;
155 barrier(); 151 barrier();
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index d35b15113b17..80875c43a4a4 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -105,7 +105,6 @@ void __init paging_init(void)
105 unsigned long pgd_type, asce_bits; 105 unsigned long pgd_type, asce_bits;
106 106
107 init_mm.pgd = swapper_pg_dir; 107 init_mm.pgd = swapper_pg_dir;
108#ifdef CONFIG_64BIT
109 if (VMALLOC_END > (1UL << 42)) { 108 if (VMALLOC_END > (1UL << 42)) {
110 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; 109 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
111 pgd_type = _REGION2_ENTRY_EMPTY; 110 pgd_type = _REGION2_ENTRY_EMPTY;
@@ -113,10 +112,6 @@ void __init paging_init(void)
113 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; 112 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
114 pgd_type = _REGION3_ENTRY_EMPTY; 113 pgd_type = _REGION3_ENTRY_EMPTY;
115 } 114 }
116#else
117 asce_bits = _ASCE_TABLE_LENGTH;
118 pgd_type = _SEGMENT_ENTRY_EMPTY;
119#endif
120 S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; 115 S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
121 clear_table((unsigned long *) init_mm.pgd, pgd_type, 116 clear_table((unsigned long *) init_mm.pgd, pgd_type,
122 sizeof(unsigned long)*2048); 117 sizeof(unsigned long)*2048);
diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c
index 5535cfe0ee11..0f3604395805 100644
--- a/arch/s390/mm/mem_detect.c
+++ b/arch/s390/mm/mem_detect.c
@@ -36,10 +36,6 @@ void __init detect_memory_memblock(void)
36 memsize = rzm * rnmax; 36 memsize = rzm * rnmax;
37 if (!rzm) 37 if (!rzm)
38 rzm = 1ULL << 17; 38 rzm = 1ULL << 17;
39 if (IS_ENABLED(CONFIG_32BIT)) {
40 rzm = min(ADDR2G, rzm);
41 memsize = min(ADDR2G, memsize);
42 }
43 max_physmem_end = memsize; 39 max_physmem_end = memsize;
44 addr = 0; 40 addr = 0;
45 /* keep memblock lists close to the kernel */ 41 /* keep memblock lists close to the kernel */
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 179a2c20b01f..2e8378796e87 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -190,29 +190,6 @@ unsigned long randomize_et_dyn(void)
190 return base + mmap_rnd(); 190 return base + mmap_rnd();
191} 191}
192 192
193#ifndef CONFIG_64BIT
194
195/*
196 * This function, called very early during the creation of a new
197 * process VM image, sets up which VM layout function to use:
198 */
199void arch_pick_mmap_layout(struct mm_struct *mm)
200{
201 /*
202 * Fall back to the standard layout if the personality
203 * bit is set, or if the expected stack growth is unlimited:
204 */
205 if (mmap_is_legacy()) {
206 mm->mmap_base = mmap_base_legacy();
207 mm->get_unmapped_area = arch_get_unmapped_area;
208 } else {
209 mm->mmap_base = mmap_base();
210 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
211 }
212}
213
214#else
215
216int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) 193int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
217{ 194{
218 if (is_compat_task() || (TASK_SIZE >= (1UL << 53))) 195 if (is_compat_task() || (TASK_SIZE >= (1UL << 53)))
@@ -317,5 +294,3 @@ static int __init setup_mmap_rnd(void)
317 return 0; 294 return 0;
318} 295}
319early_initcall(setup_mmap_rnd); 296early_initcall(setup_mmap_rnd);
320
321#endif
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 426c9d462d1c..749c98407b41 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -109,7 +109,7 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr)
109{ 109{
110 int i; 110 int i;
111 111
112 if (test_facility(13) && IS_ENABLED(CONFIG_64BIT)) { 112 if (test_facility(13)) {
113 __ptep_ipte_range(address, nr - 1, pte); 113 __ptep_ipte_range(address, nr - 1, pte);
114 return; 114 return;
115 } 115 }
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index b2c1542f2ba2..33f589459113 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -27,14 +27,8 @@
27#include <asm/tlbflush.h> 27#include <asm/tlbflush.h>
28#include <asm/mmu_context.h> 28#include <asm/mmu_context.h>
29 29
30#ifndef CONFIG_64BIT
31#define ALLOC_ORDER 1
32#define FRAG_MASK 0x0f
33#else
34#define ALLOC_ORDER 2 30#define ALLOC_ORDER 2
35#define FRAG_MASK 0x03 31#define FRAG_MASK 0x03
36#endif
37
38 32
39unsigned long *crst_table_alloc(struct mm_struct *mm) 33unsigned long *crst_table_alloc(struct mm_struct *mm)
40{ 34{
@@ -50,7 +44,6 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table)
50 free_pages((unsigned long) table, ALLOC_ORDER); 44 free_pages((unsigned long) table, ALLOC_ORDER);
51} 45}
52 46
53#ifdef CONFIG_64BIT
54static void __crst_table_upgrade(void *arg) 47static void __crst_table_upgrade(void *arg)
55{ 48{
56 struct mm_struct *mm = arg; 49 struct mm_struct *mm = arg;
@@ -140,7 +133,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
140 if (current->active_mm == mm) 133 if (current->active_mm == mm)
141 set_user_asce(mm); 134 set_user_asce(mm);
142} 135}
143#endif
144 136
145#ifdef CONFIG_PGSTE 137#ifdef CONFIG_PGSTE
146 138
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index b1593c2f751a..ef7d6c8fea66 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -38,12 +38,10 @@ static inline pud_t *vmem_pud_alloc(void)
38{ 38{
39 pud_t *pud = NULL; 39 pud_t *pud = NULL;
40 40
41#ifdef CONFIG_64BIT
42 pud = vmem_alloc_pages(2); 41 pud = vmem_alloc_pages(2);
43 if (!pud) 42 if (!pud)
44 return NULL; 43 return NULL;
45 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); 44 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
46#endif
47 return pud; 45 return pud;
48} 46}
49 47
@@ -51,12 +49,10 @@ static inline pmd_t *vmem_pmd_alloc(void)
51{ 49{
52 pmd_t *pmd = NULL; 50 pmd_t *pmd = NULL;
53 51
54#ifdef CONFIG_64BIT
55 pmd = vmem_alloc_pages(2); 52 pmd = vmem_alloc_pages(2);
56 if (!pmd) 53 if (!pmd)
57 return NULL; 54 return NULL;
58 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); 55 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
59#endif
60 return pmd; 56 return pmd;
61} 57}
62 58
@@ -98,7 +94,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
98 pgd_populate(&init_mm, pg_dir, pu_dir); 94 pgd_populate(&init_mm, pg_dir, pu_dir);
99 } 95 }
100 pu_dir = pud_offset(pg_dir, address); 96 pu_dir = pud_offset(pg_dir, address);
101#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) 97#ifndef CONFIG_DEBUG_PAGEALLOC
102 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address && 98 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
103 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { 99 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
104 pud_val(*pu_dir) = __pa(address) | 100 pud_val(*pu_dir) = __pa(address) |
@@ -115,7 +111,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
115 pud_populate(&init_mm, pu_dir, pm_dir); 111 pud_populate(&init_mm, pu_dir, pm_dir);
116 } 112 }
117 pm_dir = pmd_offset(pu_dir, address); 113 pm_dir = pmd_offset(pu_dir, address);
118#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) 114#ifndef CONFIG_DEBUG_PAGEALLOC
119 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address && 115 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
120 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { 116 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
121 pmd_val(*pm_dir) = __pa(address) | 117 pmd_val(*pm_dir) = __pa(address) |
@@ -222,7 +218,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
222 218
223 pm_dir = pmd_offset(pu_dir, address); 219 pm_dir = pmd_offset(pu_dir, address);
224 if (pmd_none(*pm_dir)) { 220 if (pmd_none(*pm_dir)) {
225#ifdef CONFIG_64BIT
226 /* Use 1MB frames for vmemmap if available. We always 221 /* Use 1MB frames for vmemmap if available. We always
227 * use large frames even if they are only partially 222 * use large frames even if they are only partially
228 * used. 223 * used.
@@ -240,7 +235,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
240 address = (address + PMD_SIZE) & PMD_MASK; 235 address = (address + PMD_SIZE) & PMD_MASK;
241 continue; 236 continue;
242 } 237 }
243#endif
244 pt_dir = vmem_pte_alloc(address); 238 pt_dir = vmem_pte_alloc(address);
245 if (!pt_dir) 239 if (!pt_dir)
246 goto out; 240 goto out;