aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-14 23:51:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-14 23:51:44 -0400
commitbdfa54dfd9eea001274dbcd622657a904fe43b81 (patch)
treeab251ab359e519656d7061bbe8db4c7ab355404b /arch/s390/mm
parent2481bc75283ea10e75d5fb1a8b42af363fc4b45c (diff)
parenta1307bba1adcc9b338511180fa94a54b4c3f534b (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Martin Schwidefsky: "The major change in this merge is the removal of the support for 31-bit kernels. Naturally 31-bit user space will continue to work via the compat layer. And then some cleanup, some improvements and bug fixes" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (23 commits) s390/smp: wait until secondaries are active & online s390/hibernate: fix save and restore of kernel text section s390/cacheinfo: add missing facility check s390/syscalls: simplify syscall_get_arch() s390/irq: enforce correct irqclass_sub_desc array size s390: remove "64" suffix from mem64.S and swsusp_asm64.S s390/ipl: cleanup macro usage s390/ipl: cleanup shutdown_action attributes s390/ipl: cleanup bin attr usage s390/uprobes: fix address space annotation s390: add missing arch_release_task_struct() declaration s390: make couple of functions and variables static s390/maccess: improve s390_kernel_write() s390/maccess: remove potentially broken probe_kernel_write() s390/watchdog: support for KVM hypervisors and delete pr_info messages s390/watchdog: enable KEEPALIVE for /dev/watchdog s390/dasd: remove setting of scheduler from driver s390/traps: panic() instead of die() on translation exception s390: remove test_facility(2) (== z/Architecture mode active) checks s390/cmpxchg: simplify cmpxchg_double ...
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/dump_pagetables.c24
-rw-r--r--arch/s390/mm/extmem.c14
-rw-r--r--arch/s390/mm/fault.c36
-rw-r--r--arch/s390/mm/gup.c4
-rw-r--r--arch/s390/mm/init.c5
-rw-r--r--arch/s390/mm/maccess.c70
-rw-r--r--arch/s390/mm/mem_detect.c4
-rw-r--r--arch/s390/mm/mmap.c32
-rw-r--r--arch/s390/mm/pageattr.c2
-rw-r--r--arch/s390/mm/pgtable.c8
-rw-r--r--arch/s390/mm/vmem.c10
11 files changed, 43 insertions, 166 deletions
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index d46cadeda204..8556d6be9b54 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -18,9 +18,7 @@ enum address_markers_idx {
18 KERNEL_END_NR, 18 KERNEL_END_NR,
19 VMEMMAP_NR, 19 VMEMMAP_NR,
20 VMALLOC_NR, 20 VMALLOC_NR,
21#ifdef CONFIG_64BIT
22 MODULES_NR, 21 MODULES_NR,
23#endif
24}; 22};
25 23
26static struct addr_marker address_markers[] = { 24static struct addr_marker address_markers[] = {
@@ -29,9 +27,7 @@ static struct addr_marker address_markers[] = {
29 [KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"}, 27 [KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"},
30 [VMEMMAP_NR] = {0, "vmemmap Area"}, 28 [VMEMMAP_NR] = {0, "vmemmap Area"},
31 [VMALLOC_NR] = {0, "vmalloc Area"}, 29 [VMALLOC_NR] = {0, "vmalloc Area"},
32#ifdef CONFIG_64BIT
33 [MODULES_NR] = {0, "Modules Area"}, 30 [MODULES_NR] = {0, "Modules Area"},
34#endif
35 { -1, NULL } 31 { -1, NULL }
36}; 32};
37 33
@@ -127,12 +123,6 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st,
127 } 123 }
128} 124}
129 125
130#ifdef CONFIG_64BIT
131#define _PMD_PROT_MASK _SEGMENT_ENTRY_PROTECT
132#else
133#define _PMD_PROT_MASK 0
134#endif
135
136static void walk_pmd_level(struct seq_file *m, struct pg_state *st, 126static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
137 pud_t *pud, unsigned long addr) 127 pud_t *pud, unsigned long addr)
138{ 128{
@@ -145,7 +135,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
145 pmd = pmd_offset(pud, addr); 135 pmd = pmd_offset(pud, addr);
146 if (!pmd_none(*pmd)) { 136 if (!pmd_none(*pmd)) {
147 if (pmd_large(*pmd)) { 137 if (pmd_large(*pmd)) {
148 prot = pmd_val(*pmd) & _PMD_PROT_MASK; 138 prot = pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT;
149 note_page(m, st, prot, 3); 139 note_page(m, st, prot, 3);
150 } else 140 } else
151 walk_pte_level(m, st, pmd, addr); 141 walk_pte_level(m, st, pmd, addr);
@@ -155,12 +145,6 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
155 } 145 }
156} 146}
157 147
158#ifdef CONFIG_64BIT
159#define _PUD_PROT_MASK _REGION3_ENTRY_RO
160#else
161#define _PUD_PROT_MASK 0
162#endif
163
164static void walk_pud_level(struct seq_file *m, struct pg_state *st, 148static void walk_pud_level(struct seq_file *m, struct pg_state *st,
165 pgd_t *pgd, unsigned long addr) 149 pgd_t *pgd, unsigned long addr)
166{ 150{
@@ -173,7 +157,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
173 pud = pud_offset(pgd, addr); 157 pud = pud_offset(pgd, addr);
174 if (!pud_none(*pud)) 158 if (!pud_none(*pud))
175 if (pud_large(*pud)) { 159 if (pud_large(*pud)) {
176 prot = pud_val(*pud) & _PUD_PROT_MASK; 160 prot = pud_val(*pud) & _REGION3_ENTRY_RO;
177 note_page(m, st, prot, 2); 161 note_page(m, st, prot, 2);
178 } else 162 } else
179 walk_pmd_level(m, st, pud, addr); 163 walk_pmd_level(m, st, pud, addr);
@@ -230,13 +214,9 @@ static int pt_dump_init(void)
230 * kernel ASCE. We need this to keep the page table walker functions 214 * kernel ASCE. We need this to keep the page table walker functions
231 * from accessing non-existent entries. 215 * from accessing non-existent entries.
232 */ 216 */
233#ifdef CONFIG_32BIT
234 max_addr = 1UL << 31;
235#else
236 max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2; 217 max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
237 max_addr = 1UL << (max_addr * 11 + 31); 218 max_addr = 1UL << (max_addr * 11 + 31);
238 address_markers[MODULES_NR].start_address = MODULES_VADDR; 219 address_markers[MODULES_NR].start_address = MODULES_VADDR;
239#endif
240 address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap; 220 address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
241 address_markers[VMALLOC_NR].start_address = VMALLOC_START; 221 address_markers[VMALLOC_NR].start_address = VMALLOC_START;
242 debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops); 222 debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 519bba716cc3..23c496957c22 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -51,7 +51,6 @@ struct qout64 {
51 struct qrange range[6]; 51 struct qrange range[6];
52}; 52};
53 53
54#ifdef CONFIG_64BIT
55struct qrange_old { 54struct qrange_old {
56 unsigned int start; /* last byte type */ 55 unsigned int start; /* last byte type */
57 unsigned int end; /* last byte reserved */ 56 unsigned int end; /* last byte reserved */
@@ -65,7 +64,6 @@ struct qout64_old {
65 int segrcnt; 64 int segrcnt;
66 struct qrange_old range[6]; 65 struct qrange_old range[6];
67}; 66};
68#endif
69 67
70struct qin64 { 68struct qin64 {
71 char qopcode; 69 char qopcode;
@@ -103,7 +101,6 @@ static int scode_set;
103static int 101static int
104dcss_set_subcodes(void) 102dcss_set_subcodes(void)
105{ 103{
106#ifdef CONFIG_64BIT
107 char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA); 104 char *name = kmalloc(8 * sizeof(char), GFP_KERNEL | GFP_DMA);
108 unsigned long rx, ry; 105 unsigned long rx, ry;
109 int rc; 106 int rc;
@@ -135,7 +132,6 @@ dcss_set_subcodes(void)
135 segext_scode = DCSS_SEGEXTX; 132 segext_scode = DCSS_SEGEXTX;
136 return 0; 133 return 0;
137 } 134 }
138#endif
139 /* Diag x'64' new subcodes are not supported, set to old subcodes */ 135 /* Diag x'64' new subcodes are not supported, set to old subcodes */
140 loadshr_scode = DCSS_LOADNOLY; 136 loadshr_scode = DCSS_LOADNOLY;
141 loadnsr_scode = DCSS_LOADNSR; 137 loadnsr_scode = DCSS_LOADNSR;
@@ -208,7 +204,6 @@ dcss_diag(int *func, void *parameter,
208 rx = (unsigned long) parameter; 204 rx = (unsigned long) parameter;
209 ry = (unsigned long) *func; 205 ry = (unsigned long) *func;
210 206
211#ifdef CONFIG_64BIT
212 /* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */ 207 /* 64-bit Diag x'64' new subcode, keep in 64-bit addressing mode */
213 if (*func > DCSS_SEGEXT) 208 if (*func > DCSS_SEGEXT)
214 asm volatile( 209 asm volatile(
@@ -225,13 +220,6 @@ dcss_diag(int *func, void *parameter,
225 " ipm %2\n" 220 " ipm %2\n"
226 " srl %2,28\n" 221 " srl %2,28\n"
227 : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc"); 222 : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
228#else
229 asm volatile(
230 " diag %0,%1,0x64\n"
231 " ipm %2\n"
232 " srl %2,28\n"
233 : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
234#endif
235 *ret1 = rx; 223 *ret1 = rx;
236 *ret2 = ry; 224 *ret2 = ry;
237 return rc; 225 return rc;
@@ -281,7 +269,6 @@ query_segment_type (struct dcss_segment *seg)
281 goto out_free; 269 goto out_free;
282 } 270 }
283 271
284#ifdef CONFIG_64BIT
285 /* Only old format of output area of Diagnose x'64' is supported, 272 /* Only old format of output area of Diagnose x'64' is supported,
286 copy data for the new format. */ 273 copy data for the new format. */
287 if (segext_scode == DCSS_SEGEXT) { 274 if (segext_scode == DCSS_SEGEXT) {
@@ -307,7 +294,6 @@ query_segment_type (struct dcss_segment *seg)
307 } 294 }
308 kfree(qout_old); 295 kfree(qout_old);
309 } 296 }
310#endif
311 if (qout->segcnt > 6) { 297 if (qout->segcnt > 6) {
312 rc = -EOPNOTSUPP; 298 rc = -EOPNOTSUPP;
313 goto out_free; 299 goto out_free;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 3ff86533f7db..76515bcea2f1 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -36,15 +36,9 @@
36#include <asm/facility.h> 36#include <asm/facility.h>
37#include "../kernel/entry.h" 37#include "../kernel/entry.h"
38 38
39#ifndef CONFIG_64BIT
40#define __FAIL_ADDR_MASK 0x7ffff000
41#define __SUBCODE_MASK 0x0200
42#define __PF_RES_FIELD 0ULL
43#else /* CONFIG_64BIT */
44#define __FAIL_ADDR_MASK -4096L 39#define __FAIL_ADDR_MASK -4096L
45#define __SUBCODE_MASK 0x0600 40#define __SUBCODE_MASK 0x0600
46#define __PF_RES_FIELD 0x8000000000000000ULL 41#define __PF_RES_FIELD 0x8000000000000000ULL
47#endif /* CONFIG_64BIT */
48 42
49#define VM_FAULT_BADCONTEXT 0x010000 43#define VM_FAULT_BADCONTEXT 0x010000
50#define VM_FAULT_BADMAP 0x020000 44#define VM_FAULT_BADMAP 0x020000
@@ -54,7 +48,6 @@
54 48
55static unsigned long store_indication __read_mostly; 49static unsigned long store_indication __read_mostly;
56 50
57#ifdef CONFIG_64BIT
58static int __init fault_init(void) 51static int __init fault_init(void)
59{ 52{
60 if (test_facility(75)) 53 if (test_facility(75))
@@ -62,7 +55,6 @@ static int __init fault_init(void)
62 return 0; 55 return 0;
63} 56}
64early_initcall(fault_init); 57early_initcall(fault_init);
65#endif
66 58
67static inline int notify_page_fault(struct pt_regs *regs) 59static inline int notify_page_fault(struct pt_regs *regs)
68{ 60{
@@ -133,7 +125,6 @@ static int bad_address(void *p)
133 return probe_kernel_address((unsigned long *)p, dummy); 125 return probe_kernel_address((unsigned long *)p, dummy);
134} 126}
135 127
136#ifdef CONFIG_64BIT
137static void dump_pagetable(unsigned long asce, unsigned long address) 128static void dump_pagetable(unsigned long asce, unsigned long address)
138{ 129{
139 unsigned long *table = __va(asce & PAGE_MASK); 130 unsigned long *table = __va(asce & PAGE_MASK);
@@ -187,33 +178,6 @@ bad:
187 pr_cont("BAD\n"); 178 pr_cont("BAD\n");
188} 179}
189 180
190#else /* CONFIG_64BIT */
191
192static void dump_pagetable(unsigned long asce, unsigned long address)
193{
194 unsigned long *table = __va(asce & PAGE_MASK);
195
196 pr_alert("AS:%08lx ", asce);
197 table = table + ((address >> 20) & 0x7ff);
198 if (bad_address(table))
199 goto bad;
200 pr_cont("S:%08lx ", *table);
201 if (*table & _SEGMENT_ENTRY_INVALID)
202 goto out;
203 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
204 table = table + ((address >> 12) & 0xff);
205 if (bad_address(table))
206 goto bad;
207 pr_cont("P:%08lx ", *table);
208out:
209 pr_cont("\n");
210 return;
211bad:
212 pr_cont("BAD\n");
213}
214
215#endif /* CONFIG_64BIT */
216
217static void dump_fault_info(struct pt_regs *regs) 181static void dump_fault_info(struct pt_regs *regs)
218{ 182{
219 unsigned long asce; 183 unsigned long asce;
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 5c586c78ca8d..1eb41bb3010c 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -106,11 +106,9 @@ static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
106 pmd_t *pmdp, pmd; 106 pmd_t *pmdp, pmd;
107 107
108 pmdp = (pmd_t *) pudp; 108 pmdp = (pmd_t *) pudp;
109#ifdef CONFIG_64BIT
110 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 109 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
111 pmdp = (pmd_t *) pud_deref(pud); 110 pmdp = (pmd_t *) pud_deref(pud);
112 pmdp += pmd_index(addr); 111 pmdp += pmd_index(addr);
113#endif
114 do { 112 do {
115 pmd = *pmdp; 113 pmd = *pmdp;
116 barrier(); 114 barrier();
@@ -145,11 +143,9 @@ static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
145 pud_t *pudp, pud; 143 pud_t *pudp, pud;
146 144
147 pudp = (pud_t *) pgdp; 145 pudp = (pud_t *) pgdp;
148#ifdef CONFIG_64BIT
149 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 146 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
150 pudp = (pud_t *) pgd_deref(pgd); 147 pudp = (pud_t *) pgd_deref(pgd);
151 pudp += pud_index(addr); 148 pudp += pud_index(addr);
152#endif
153 do { 149 do {
154 pud = *pudp; 150 pud = *pudp;
155 barrier(); 151 barrier();
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index d35b15113b17..80875c43a4a4 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -105,7 +105,6 @@ void __init paging_init(void)
105 unsigned long pgd_type, asce_bits; 105 unsigned long pgd_type, asce_bits;
106 106
107 init_mm.pgd = swapper_pg_dir; 107 init_mm.pgd = swapper_pg_dir;
108#ifdef CONFIG_64BIT
109 if (VMALLOC_END > (1UL << 42)) { 108 if (VMALLOC_END > (1UL << 42)) {
110 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; 109 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
111 pgd_type = _REGION2_ENTRY_EMPTY; 110 pgd_type = _REGION2_ENTRY_EMPTY;
@@ -113,10 +112,6 @@ void __init paging_init(void)
113 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; 112 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
114 pgd_type = _REGION3_ENTRY_EMPTY; 113 pgd_type = _REGION3_ENTRY_EMPTY;
115 } 114 }
116#else
117 asce_bits = _ASCE_TABLE_LENGTH;
118 pgd_type = _SEGMENT_ENTRY_EMPTY;
119#endif
120 S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; 115 S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
121 clear_table((unsigned long *) init_mm.pgd, pgd_type, 116 clear_table((unsigned long *) init_mm.pgd, pgd_type,
122 sizeof(unsigned long)*2048); 117 sizeof(unsigned long)*2048);
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 2eb34bdfc613..8a993a53fcd6 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Access kernel memory without faulting -- s390 specific implementation. 2 * Access kernel memory without faulting -- s390 specific implementation.
3 * 3 *
4 * Copyright IBM Corp. 2009 4 * Copyright IBM Corp. 2009, 2015
5 * 5 *
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
7 * 7 *
@@ -16,51 +16,55 @@
16#include <asm/ctl_reg.h> 16#include <asm/ctl_reg.h>
17#include <asm/io.h> 17#include <asm/io.h>
18 18
19/* 19static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
20 * This function writes to kernel memory bypassing DAT and possible
21 * write protection. It copies one to four bytes from src to dst
22 * using the stura instruction.
23 * Returns the number of bytes copied or -EFAULT.
24 */
25static long probe_kernel_write_odd(void *dst, const void *src, size_t size)
26{ 20{
27 unsigned long count, aligned; 21 unsigned long aligned, offset, count;
28 int offset, mask; 22 char tmp[8];
29 int rc = -EFAULT;
30 23
31 aligned = (unsigned long) dst & ~3UL; 24 aligned = (unsigned long) dst & ~7UL;
32 offset = (unsigned long) dst & 3; 25 offset = (unsigned long) dst & 7UL;
33 count = min_t(unsigned long, 4 - offset, size); 26 size = min(8UL - offset, size);
34 mask = (0xf << (4 - count)) & 0xf; 27 count = size - 1;
35 mask >>= offset;
36 asm volatile( 28 asm volatile(
37 " bras 1,0f\n" 29 " bras 1,0f\n"
38 " icm 0,0,0(%3)\n" 30 " mvc 0(1,%4),0(%5)\n"
39 "0: l 0,0(%1)\n" 31 "0: mvc 0(8,%3),0(%0)\n"
40 " lra %1,0(%1)\n" 32 " ex %1,0(1)\n"
41 "1: ex %2,0(1)\n" 33 " lg %1,0(%3)\n"
42 "2: stura 0,%1\n" 34 " lra %0,0(%0)\n"
43 " la %0,0\n" 35 " sturg %1,%0\n"
44 "3:\n" 36 : "+&a" (aligned), "+&a" (count), "=m" (tmp)
45 EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b) 37 : "a" (&tmp), "a" (&tmp[offset]), "a" (src)
46 : "+d" (rc), "+a" (aligned) 38 : "cc", "memory", "1");
47 : "a" (mask), "a" (src) : "cc", "memory", "0", "1"); 39 return size;
48 return rc ? rc : count;
49} 40}
50 41
51long probe_kernel_write(void *dst, const void *src, size_t size) 42/*
43 * s390_kernel_write - write to kernel memory bypassing DAT
44 * @dst: destination address
45 * @src: source address
46 * @size: number of bytes to copy
47 *
48 * This function writes to kernel memory bypassing DAT and possible page table
49 * write protection. It writes to the destination using the sturg instruction.
50 * Therefore we have a read-modify-write sequence: the function reads eight
51 * bytes from destination at an eight byte boundary, modifies the bytes
52 * requested and writes the result back in a loop.
53 *
54 * Note: this means that this function may not be called concurrently on
55 * several cpus with overlapping words, since this may potentially
56 * cause data corruption.
57 */
58void notrace s390_kernel_write(void *dst, const void *src, size_t size)
52{ 59{
53 long copied = 0; 60 long copied;
54 61
55 while (size) { 62 while (size) {
56 copied = probe_kernel_write_odd(dst, src, size); 63 copied = s390_kernel_write_odd(dst, src, size);
57 if (copied < 0)
58 break;
59 dst += copied; 64 dst += copied;
60 src += copied; 65 src += copied;
61 size -= copied; 66 size -= copied;
62 } 67 }
63 return copied < 0 ? -EFAULT : 0;
64} 68}
65 69
66static int __memcpy_real(void *dest, void *src, size_t count) 70static int __memcpy_real(void *dest, void *src, size_t count)
diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c
index 5535cfe0ee11..0f3604395805 100644
--- a/arch/s390/mm/mem_detect.c
+++ b/arch/s390/mm/mem_detect.c
@@ -36,10 +36,6 @@ void __init detect_memory_memblock(void)
36 memsize = rzm * rnmax; 36 memsize = rzm * rnmax;
37 if (!rzm) 37 if (!rzm)
38 rzm = 1ULL << 17; 38 rzm = 1ULL << 17;
39 if (IS_ENABLED(CONFIG_32BIT)) {
40 rzm = min(ADDR2G, rzm);
41 memsize = min(ADDR2G, memsize);
42 }
43 max_physmem_end = memsize; 39 max_physmem_end = memsize;
44 addr = 0; 40 addr = 0;
45 /* keep memblock lists close to the kernel */ 41 /* keep memblock lists close to the kernel */
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index bb3367c5cb0b..6e552af08c76 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -32,7 +32,7 @@
32#include <asm/pgalloc.h> 32#include <asm/pgalloc.h>
33 33
34unsigned long mmap_rnd_mask; 34unsigned long mmap_rnd_mask;
35unsigned long mmap_align_mask; 35static unsigned long mmap_align_mask;
36 36
37static unsigned long stack_maxrandom_size(void) 37static unsigned long stack_maxrandom_size(void)
38{ 38{
@@ -177,34 +177,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
177 return addr; 177 return addr;
178} 178}
179 179
180#ifndef CONFIG_64BIT
181
182/*
183 * This function, called very early during the creation of a new
184 * process VM image, sets up which VM layout function to use:
185 */
186void arch_pick_mmap_layout(struct mm_struct *mm)
187{
188 unsigned long random_factor = 0UL;
189
190 if (current->flags & PF_RANDOMIZE)
191 random_factor = arch_mmap_rnd();
192
193 /*
194 * Fall back to the standard layout if the personality
195 * bit is set, or if the expected stack growth is unlimited:
196 */
197 if (mmap_is_legacy()) {
198 mm->mmap_base = mmap_base_legacy(random_factor);
199 mm->get_unmapped_area = arch_get_unmapped_area;
200 } else {
201 mm->mmap_base = mmap_base(random_factor);
202 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
203 }
204}
205
206#else
207
208int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) 180int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
209{ 181{
210 if (is_compat_task() || (TASK_SIZE >= (1UL << 53))) 182 if (is_compat_task() || (TASK_SIZE >= (1UL << 53)))
@@ -314,5 +286,3 @@ static int __init setup_mmap_rnd(void)
314 return 0; 286 return 0;
315} 287}
316early_initcall(setup_mmap_rnd); 288early_initcall(setup_mmap_rnd);
317
318#endif
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 426c9d462d1c..749c98407b41 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -109,7 +109,7 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr)
109{ 109{
110 int i; 110 int i;
111 111
112 if (test_facility(13) && IS_ENABLED(CONFIG_64BIT)) { 112 if (test_facility(13)) {
113 __ptep_ipte_range(address, nr - 1, pte); 113 __ptep_ipte_range(address, nr - 1, pte);
114 return; 114 return;
115 } 115 }
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index b2c1542f2ba2..33f589459113 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -27,14 +27,8 @@
27#include <asm/tlbflush.h> 27#include <asm/tlbflush.h>
28#include <asm/mmu_context.h> 28#include <asm/mmu_context.h>
29 29
30#ifndef CONFIG_64BIT
31#define ALLOC_ORDER 1
32#define FRAG_MASK 0x0f
33#else
34#define ALLOC_ORDER 2 30#define ALLOC_ORDER 2
35#define FRAG_MASK 0x03 31#define FRAG_MASK 0x03
36#endif
37
38 32
39unsigned long *crst_table_alloc(struct mm_struct *mm) 33unsigned long *crst_table_alloc(struct mm_struct *mm)
40{ 34{
@@ -50,7 +44,6 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table)
50 free_pages((unsigned long) table, ALLOC_ORDER); 44 free_pages((unsigned long) table, ALLOC_ORDER);
51} 45}
52 46
53#ifdef CONFIG_64BIT
54static void __crst_table_upgrade(void *arg) 47static void __crst_table_upgrade(void *arg)
55{ 48{
56 struct mm_struct *mm = arg; 49 struct mm_struct *mm = arg;
@@ -140,7 +133,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
140 if (current->active_mm == mm) 133 if (current->active_mm == mm)
141 set_user_asce(mm); 134 set_user_asce(mm);
142} 135}
143#endif
144 136
145#ifdef CONFIG_PGSTE 137#ifdef CONFIG_PGSTE
146 138
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index b1593c2f751a..ef7d6c8fea66 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -38,12 +38,10 @@ static inline pud_t *vmem_pud_alloc(void)
38{ 38{
39 pud_t *pud = NULL; 39 pud_t *pud = NULL;
40 40
41#ifdef CONFIG_64BIT
42 pud = vmem_alloc_pages(2); 41 pud = vmem_alloc_pages(2);
43 if (!pud) 42 if (!pud)
44 return NULL; 43 return NULL;
45 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); 44 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
46#endif
47 return pud; 45 return pud;
48} 46}
49 47
@@ -51,12 +49,10 @@ static inline pmd_t *vmem_pmd_alloc(void)
51{ 49{
52 pmd_t *pmd = NULL; 50 pmd_t *pmd = NULL;
53 51
54#ifdef CONFIG_64BIT
55 pmd = vmem_alloc_pages(2); 52 pmd = vmem_alloc_pages(2);
56 if (!pmd) 53 if (!pmd)
57 return NULL; 54 return NULL;
58 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); 55 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
59#endif
60 return pmd; 56 return pmd;
61} 57}
62 58
@@ -98,7 +94,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
98 pgd_populate(&init_mm, pg_dir, pu_dir); 94 pgd_populate(&init_mm, pg_dir, pu_dir);
99 } 95 }
100 pu_dir = pud_offset(pg_dir, address); 96 pu_dir = pud_offset(pg_dir, address);
101#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) 97#ifndef CONFIG_DEBUG_PAGEALLOC
102 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address && 98 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
103 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { 99 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
104 pud_val(*pu_dir) = __pa(address) | 100 pud_val(*pu_dir) = __pa(address) |
@@ -115,7 +111,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
115 pud_populate(&init_mm, pu_dir, pm_dir); 111 pud_populate(&init_mm, pu_dir, pm_dir);
116 } 112 }
117 pm_dir = pmd_offset(pu_dir, address); 113 pm_dir = pmd_offset(pu_dir, address);
118#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) 114#ifndef CONFIG_DEBUG_PAGEALLOC
119 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address && 115 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
120 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { 116 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
121 pmd_val(*pm_dir) = __pa(address) | 117 pmd_val(*pm_dir) = __pa(address) |
@@ -222,7 +218,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
222 218
223 pm_dir = pmd_offset(pu_dir, address); 219 pm_dir = pmd_offset(pu_dir, address);
224 if (pmd_none(*pm_dir)) { 220 if (pmd_none(*pm_dir)) {
225#ifdef CONFIG_64BIT
226 /* Use 1MB frames for vmemmap if available. We always 221 /* Use 1MB frames for vmemmap if available. We always
227 * use large frames even if they are only partially 222 * use large frames even if they are only partially
228 * used. 223 * used.
@@ -240,7 +235,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
240 address = (address + PMD_SIZE) & PMD_MASK; 235 address = (address + PMD_SIZE) & PMD_MASK;
241 continue; 236 continue;
242 } 237 }
243#endif
244 pt_dir = vmem_pte_alloc(address); 238 pt_dir = vmem_pte_alloc(address);
245 if (!pt_dir) 239 if (!pt_dir)
246 goto out; 240 goto out;