aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm/init.c
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2006-12-08 09:56:07 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2006-12-08 09:56:07 -0500
commitf4eb07c17df2e6cf9bd58bfcd9cc9e05e9489d07 (patch)
treec1b4b422d3b8183edf452cc745dadd0fe129018b /arch/s390/mm/init.c
parent7f090145a14afc35844dce80174c9c24f9e66ec5 (diff)
[S390] Virtual memmap for s390.
Virtual memmap support for s390. Inspired by the ia64 implementation. Unlike ia64 we need a mechanism which allows us to dynamically attach shared memory regions. These memory regions are accessed via the dcss device driver. dcss implements the 'direct_access' operation, which requires struct pages for every single shared page. Therefore this implementation provides an interface to attach/detach shared memory: int add_shared_memory(unsigned long start, unsigned long size); int remove_shared_memory(unsigned long start, unsigned long size); The purpose of the add_shared_memory function is to add the given memory range to the 1:1 mapping and to make sure that the corresponding range in the vmemmap is backed with physical pages. It also initialises the new struct pages. remove_shared_memory in turn only invalidates the page table entries in the 1:1 mapping. The page tables and the memory used for struct pages in the vmemmap are currently not freed. They will be reused when the next segment will be attached. Given that the maximum size of a shared memory region is 2GB and in addition all regions must reside below 2GB this is not too much of a restriction, but there is room for improvement. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm/init.c')
-rw-r--r--arch/s390/mm/init.c163
1 files changed, 41 insertions, 122 deletions
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index e1881c31b1cb..5ea12a573cad 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -69,6 +69,8 @@ void show_mem(void)
69 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 69 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
70 i = max_mapnr; 70 i = max_mapnr;
71 while (i-- > 0) { 71 while (i-- > 0) {
72 if (!pfn_valid(i))
73 continue;
72 page = pfn_to_page(i); 74 page = pfn_to_page(i);
73 total++; 75 total++;
74 if (PageReserved(page)) 76 if (PageReserved(page))
@@ -84,67 +86,53 @@ void show_mem(void)
84 printk("%d pages swap cached\n",cached); 86 printk("%d pages swap cached\n",cached);
85} 87}
86 88
89static void __init setup_ro_region(void)
90{
91 pgd_t *pgd;
92 pmd_t *pmd;
93 pte_t *pte;
94 pte_t new_pte;
95 unsigned long address, end;
96
97 address = ((unsigned long)&__start_rodata) & PAGE_MASK;
98 end = PFN_ALIGN((unsigned long)&__end_rodata);
99
100 for (; address < end; address += PAGE_SIZE) {
101 pgd = pgd_offset_k(address);
102 pmd = pmd_offset(pgd, address);
103 pte = pte_offset_kernel(pmd, address);
104 new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO));
105 set_pte(pte, new_pte);
106 }
107}
108
87extern unsigned long __initdata zholes_size[]; 109extern unsigned long __initdata zholes_size[];
110extern void vmem_map_init(void);
88/* 111/*
89 * paging_init() sets up the page tables 112 * paging_init() sets up the page tables
90 */ 113 */
91
92#ifndef CONFIG_64BIT
93void __init paging_init(void) 114void __init paging_init(void)
94{ 115{
95 pgd_t * pg_dir; 116 pgd_t *pg_dir;
96 pte_t * pg_table; 117 int i;
97 pte_t pte; 118 unsigned long pgdir_k;
98 int i; 119 static const int ssm_mask = 0x04000000L;
99 unsigned long tmp;
100 unsigned long pfn = 0;
101 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
102 static const int ssm_mask = 0x04000000L;
103 unsigned long ro_start_pfn, ro_end_pfn;
104 unsigned long zones_size[MAX_NR_ZONES]; 120 unsigned long zones_size[MAX_NR_ZONES];
121 unsigned long dma_pfn, high_pfn;
105 122
106 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); 123 pg_dir = swapper_pg_dir;
107 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
108
109 memset(zones_size, 0, sizeof(zones_size));
110 zones_size[ZONE_DMA] = max_low_pfn;
111 free_area_init_node(0, &contig_page_data, zones_size,
112 __pa(PAGE_OFFSET) >> PAGE_SHIFT,
113 zholes_size);
114
115 /* unmap whole virtual address space */
116 124
117 pg_dir = swapper_pg_dir; 125#ifdef CONFIG_64BIT
118 126 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE;
119 for (i = 0; i < PTRS_PER_PGD; i++) 127 for (i = 0; i < PTRS_PER_PGD; i++)
120 pmd_clear((pmd_t *) pg_dir++); 128 pgd_clear(pg_dir + i);
121 129#else
122 /* 130 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
123 * map whole physical memory to virtual memory (identity mapping) 131 for (i = 0; i < PTRS_PER_PGD; i++)
124 */ 132 pmd_clear((pmd_t *)(pg_dir + i));
125 133#endif
126 pg_dir = swapper_pg_dir; 134 vmem_map_init();
127 135 setup_ro_region();
128 while (pfn < max_low_pfn) {
129 /*
130 * pg_table is physical at this point
131 */
132 pg_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
133
134 pmd_populate_kernel(&init_mm, (pmd_t *) pg_dir, pg_table);
135 pg_dir++;
136
137 for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
138 if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
139 pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
140 else
141 pte = pfn_pte(pfn, PAGE_KERNEL);
142 if (pfn >= max_low_pfn)
143 pte_val(pte) = _PAGE_TYPE_EMPTY;
144 set_pte(pg_table, pte);
145 pfn++;
146 }
147 }
148 136
149 S390_lowcore.kernel_asce = pgdir_k; 137 S390_lowcore.kernel_asce = pgdir_k;
150 138
@@ -154,31 +142,9 @@ void __init paging_init(void)
154 __ctl_load(pgdir_k, 13, 13); 142 __ctl_load(pgdir_k, 13, 13);
155 __raw_local_irq_ssm(ssm_mask); 143 __raw_local_irq_ssm(ssm_mask);
156 144
157 local_flush_tlb();
158}
159
160#else /* CONFIG_64BIT */
161
162void __init paging_init(void)
163{
164 pgd_t * pg_dir;
165 pmd_t * pm_dir;
166 pte_t * pt_dir;
167 pte_t pte;
168 int i,j,k;
169 unsigned long pfn = 0;
170 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
171 _KERN_REGION_TABLE;
172 static const int ssm_mask = 0x04000000L;
173 unsigned long zones_size[MAX_NR_ZONES];
174 unsigned long dma_pfn, high_pfn;
175 unsigned long ro_start_pfn, ro_end_pfn;
176
177 memset(zones_size, 0, sizeof(zones_size)); 145 memset(zones_size, 0, sizeof(zones_size));
178 dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; 146 dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
179 high_pfn = max_low_pfn; 147 high_pfn = max_low_pfn;
180 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
181 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
182 148
183 if (dma_pfn > high_pfn) 149 if (dma_pfn > high_pfn)
184 zones_size[ZONE_DMA] = high_pfn; 150 zones_size[ZONE_DMA] = high_pfn;
@@ -190,56 +156,7 @@ void __init paging_init(void)
190 /* Initialize mem_map[]. */ 156 /* Initialize mem_map[]. */
191 free_area_init_node(0, &contig_page_data, zones_size, 157 free_area_init_node(0, &contig_page_data, zones_size,
192 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size); 158 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
193
194 /*
195 * map whole physical memory to virtual memory (identity mapping)
196 */
197
198 pg_dir = swapper_pg_dir;
199
200 for (i = 0 ; i < PTRS_PER_PGD ; i++,pg_dir++) {
201
202 if (pfn >= max_low_pfn) {
203 pgd_clear(pg_dir);
204 continue;
205 }
206
207 pm_dir = (pmd_t *) alloc_bootmem_pages(PAGE_SIZE * 4);
208 pgd_populate(&init_mm, pg_dir, pm_dir);
209
210 for (j = 0 ; j < PTRS_PER_PMD ; j++,pm_dir++) {
211 if (pfn >= max_low_pfn) {
212 pmd_clear(pm_dir);
213 continue;
214 }
215
216 pt_dir = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
217 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
218
219 for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
220 if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
221 pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
222 else
223 pte = pfn_pte(pfn, PAGE_KERNEL);
224 if (pfn >= max_low_pfn)
225 pte_val(pte) = _PAGE_TYPE_EMPTY;
226 set_pte(pt_dir, pte);
227 pfn++;
228 }
229 }
230 }
231
232 S390_lowcore.kernel_asce = pgdir_k;
233
234 /* enable virtual mapping in kernel mode */
235 __ctl_load(pgdir_k, 1, 1);
236 __ctl_load(pgdir_k, 7, 7);
237 __ctl_load(pgdir_k, 13, 13);
238 __raw_local_irq_ssm(ssm_mask);
239
240 local_flush_tlb();
241} 159}
242#endif /* CONFIG_64BIT */
243 160
244void __init mem_init(void) 161void __init mem_init(void)
245{ 162{
@@ -269,6 +186,8 @@ void __init mem_init(void)
269 printk("Write protected kernel read-only data: %#lx - %#lx\n", 186 printk("Write protected kernel read-only data: %#lx - %#lx\n",
270 (unsigned long)&__start_rodata, 187 (unsigned long)&__start_rodata,
271 PFN_ALIGN((unsigned long)&__end_rodata) - 1); 188 PFN_ALIGN((unsigned long)&__end_rodata) - 1);
189 printk("Virtual memmap size: %ldk\n",
190 (max_pfn * sizeof(struct page)) >> 10);
272} 191}
273 192
274void free_initmem(void) 193void free_initmem(void)