aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/dump_pagetables.c
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2015-02-12 07:08:27 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2015-03-25 06:49:33 -0400
commit5a79859ae0f35d25c67a03e82bf0c80592f16a39 (patch)
tree37264d49f069812f19ced94e6ae171814fb7e498 /arch/s390/mm/dump_pagetables.c
parent1833c9f647e9bda1cd24653ff8f9c207b5f5b911 (diff)
s390: remove 31 bit support
Remove the 31 bit support in order to reduce maintenance cost and effectively remove dead code. Since a couple of years there is no distribution left that comes with a 31 bit kernel. The 31 bit kernel also has been broken since more than a year before anybody noticed. In addition I added a removal warning to the kernel shown at ipl for 5 minutes: a960062e5826 ("s390: add 31 bit warning message") which let everybody know about the plan to remove 31 bit code. We didn't get any response. Given that the last 31 bit only machine was introduced in 1999 let's remove the code. Anybody with 31 bit user space code can still use the compat mode. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm/dump_pagetables.c')
-rw-r--r--arch/s390/mm/dump_pagetables.c24
1 files changed, 2 insertions, 22 deletions
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index d46cadeda204..8556d6be9b54 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -18,9 +18,7 @@ enum address_markers_idx {
18 KERNEL_END_NR, 18 KERNEL_END_NR,
19 VMEMMAP_NR, 19 VMEMMAP_NR,
20 VMALLOC_NR, 20 VMALLOC_NR,
21#ifdef CONFIG_64BIT
22 MODULES_NR, 21 MODULES_NR,
23#endif
24}; 22};
25 23
26static struct addr_marker address_markers[] = { 24static struct addr_marker address_markers[] = {
@@ -29,9 +27,7 @@ static struct addr_marker address_markers[] = {
29 [KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"}, 27 [KERNEL_END_NR] = {(unsigned long)&_end, "Kernel Image End"},
30 [VMEMMAP_NR] = {0, "vmemmap Area"}, 28 [VMEMMAP_NR] = {0, "vmemmap Area"},
31 [VMALLOC_NR] = {0, "vmalloc Area"}, 29 [VMALLOC_NR] = {0, "vmalloc Area"},
32#ifdef CONFIG_64BIT
33 [MODULES_NR] = {0, "Modules Area"}, 30 [MODULES_NR] = {0, "Modules Area"},
34#endif
35 { -1, NULL } 31 { -1, NULL }
36}; 32};
37 33
@@ -127,12 +123,6 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st,
127 } 123 }
128} 124}
129 125
130#ifdef CONFIG_64BIT
131#define _PMD_PROT_MASK _SEGMENT_ENTRY_PROTECT
132#else
133#define _PMD_PROT_MASK 0
134#endif
135
136static void walk_pmd_level(struct seq_file *m, struct pg_state *st, 126static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
137 pud_t *pud, unsigned long addr) 127 pud_t *pud, unsigned long addr)
138{ 128{
@@ -145,7 +135,7 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
145 pmd = pmd_offset(pud, addr); 135 pmd = pmd_offset(pud, addr);
146 if (!pmd_none(*pmd)) { 136 if (!pmd_none(*pmd)) {
147 if (pmd_large(*pmd)) { 137 if (pmd_large(*pmd)) {
148 prot = pmd_val(*pmd) & _PMD_PROT_MASK; 138 prot = pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT;
149 note_page(m, st, prot, 3); 139 note_page(m, st, prot, 3);
150 } else 140 } else
151 walk_pte_level(m, st, pmd, addr); 141 walk_pte_level(m, st, pmd, addr);
@@ -155,12 +145,6 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
155 } 145 }
156} 146}
157 147
158#ifdef CONFIG_64BIT
159#define _PUD_PROT_MASK _REGION3_ENTRY_RO
160#else
161#define _PUD_PROT_MASK 0
162#endif
163
164static void walk_pud_level(struct seq_file *m, struct pg_state *st, 148static void walk_pud_level(struct seq_file *m, struct pg_state *st,
165 pgd_t *pgd, unsigned long addr) 149 pgd_t *pgd, unsigned long addr)
166{ 150{
@@ -173,7 +157,7 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
173 pud = pud_offset(pgd, addr); 157 pud = pud_offset(pgd, addr);
174 if (!pud_none(*pud)) 158 if (!pud_none(*pud))
175 if (pud_large(*pud)) { 159 if (pud_large(*pud)) {
176 prot = pud_val(*pud) & _PUD_PROT_MASK; 160 prot = pud_val(*pud) & _REGION3_ENTRY_RO;
177 note_page(m, st, prot, 2); 161 note_page(m, st, prot, 2);
178 } else 162 } else
179 walk_pmd_level(m, st, pud, addr); 163 walk_pmd_level(m, st, pud, addr);
@@ -230,13 +214,9 @@ static int pt_dump_init(void)
230 * kernel ASCE. We need this to keep the page table walker functions 214 * kernel ASCE. We need this to keep the page table walker functions
231 * from accessing non-existent entries. 215 * from accessing non-existent entries.
232 */ 216 */
233#ifdef CONFIG_32BIT
234 max_addr = 1UL << 31;
235#else
236 max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2; 217 max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
237 max_addr = 1UL << (max_addr * 11 + 31); 218 max_addr = 1UL << (max_addr * 11 + 31);
238 address_markers[MODULES_NR].start_address = MODULES_VADDR; 219 address_markers[MODULES_NR].start_address = MODULES_VADDR;
239#endif
240 address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap; 220 address_markers[VMEMMAP_NR].start_address = (unsigned long) vmemmap;
241 address_markers[VMALLOC_NR].start_address = VMALLOC_START; 221 address_markers[VMALLOC_NR].start_address = VMALLOC_START;
242 debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops); 222 debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);