aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/mm/init.c')
-rw-r--r--arch/s390/mm/init.c20
1 files changed, 8 insertions, 12 deletions
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 4bb21be3b007..b3e7c45efb63 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -25,7 +25,7 @@
25#include <linux/bootmem.h> 25#include <linux/bootmem.h>
26#include <linux/pfn.h> 26#include <linux/pfn.h>
27#include <linux/poison.h> 27#include <linux/poison.h>
28 28#include <linux/initrd.h>
29#include <asm/processor.h> 29#include <asm/processor.h>
30#include <asm/system.h> 30#include <asm/system.h>
31#include <asm/uaccess.h> 31#include <asm/uaccess.h>
@@ -95,20 +95,18 @@ static void __init setup_ro_region(void)
95 pte_t new_pte; 95 pte_t new_pte;
96 unsigned long address, end; 96 unsigned long address, end;
97 97
98 address = ((unsigned long)&__start_rodata) & PAGE_MASK; 98 address = ((unsigned long)&_stext) & PAGE_MASK;
99 end = PFN_ALIGN((unsigned long)&__end_rodata); 99 end = PFN_ALIGN((unsigned long)&_eshared);
100 100
101 for (; address < end; address += PAGE_SIZE) { 101 for (; address < end; address += PAGE_SIZE) {
102 pgd = pgd_offset_k(address); 102 pgd = pgd_offset_k(address);
103 pmd = pmd_offset(pgd, address); 103 pmd = pmd_offset(pgd, address);
104 pte = pte_offset_kernel(pmd, address); 104 pte = pte_offset_kernel(pmd, address);
105 new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO)); 105 new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO));
106 set_pte(pte, new_pte); 106 *pte = new_pte;
107 } 107 }
108} 108}
109 109
110extern void vmem_map_init(void);
111
112/* 110/*
113 * paging_init() sets up the page tables 111 * paging_init() sets up the page tables
114 */ 112 */
@@ -125,11 +123,11 @@ void __init paging_init(void)
125#ifdef CONFIG_64BIT 123#ifdef CONFIG_64BIT
126 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE; 124 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE;
127 for (i = 0; i < PTRS_PER_PGD; i++) 125 for (i = 0; i < PTRS_PER_PGD; i++)
128 pgd_clear(pg_dir + i); 126 pgd_clear_kernel(pg_dir + i);
129#else 127#else
130 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 128 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
131 for (i = 0; i < PTRS_PER_PGD; i++) 129 for (i = 0; i < PTRS_PER_PGD; i++)
132 pmd_clear((pmd_t *)(pg_dir + i)); 130 pmd_clear_kernel((pmd_t *)(pg_dir + i));
133#endif 131#endif
134 vmem_map_init(); 132 vmem_map_init();
135 setup_ro_region(); 133 setup_ro_region();
@@ -174,10 +172,8 @@ void __init mem_init(void)
174 datasize >>10, 172 datasize >>10,
175 initsize >> 10); 173 initsize >> 10);
176 printk("Write protected kernel read-only data: %#lx - %#lx\n", 174 printk("Write protected kernel read-only data: %#lx - %#lx\n",
177 (unsigned long)&__start_rodata, 175 (unsigned long)&_stext,
178 PFN_ALIGN((unsigned long)&__end_rodata) - 1); 176 PFN_ALIGN((unsigned long)&_eshared) - 1);
179 printk("Virtual memmap size: %ldk\n",
180 (max_pfn * sizeof(struct page)) >> 10);
181} 177}
182 178
183void free_initmem(void) 179void free_initmem(void)