diff options
Diffstat (limited to 'arch/s390/mm/vmem.c')
-rw-r--r-- | arch/s390/mm/vmem.c | 135 |
1 files changed, 53 insertions, 82 deletions
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 35d90a4720fd..beccacf907f3 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
@@ -10,10 +10,12 @@ | |||
10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/list.h> | 12 | #include <linux/list.h> |
13 | #include <linux/hugetlb.h> | ||
13 | #include <asm/pgalloc.h> | 14 | #include <asm/pgalloc.h> |
14 | #include <asm/pgtable.h> | 15 | #include <asm/pgtable.h> |
15 | #include <asm/setup.h> | 16 | #include <asm/setup.h> |
16 | #include <asm/tlbflush.h> | 17 | #include <asm/tlbflush.h> |
18 | #include <asm/sections.h> | ||
17 | 19 | ||
18 | static DEFINE_MUTEX(vmem_mutex); | 20 | static DEFINE_MUTEX(vmem_mutex); |
19 | 21 | ||
@@ -25,43 +27,6 @@ struct memory_segment { | |||
25 | 27 | ||
26 | static LIST_HEAD(mem_segs); | 28 | static LIST_HEAD(mem_segs); |
27 | 29 | ||
28 | void __meminit memmap_init(unsigned long size, int nid, unsigned long zone, | ||
29 | unsigned long start_pfn) | ||
30 | { | ||
31 | struct page *start, *end; | ||
32 | struct page *map_start, *map_end; | ||
33 | int i; | ||
34 | |||
35 | start = pfn_to_page(start_pfn); | ||
36 | end = start + size; | ||
37 | |||
38 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | ||
39 | unsigned long cstart, cend; | ||
40 | |||
41 | cstart = PFN_DOWN(memory_chunk[i].addr); | ||
42 | cend = cstart + PFN_DOWN(memory_chunk[i].size); | ||
43 | |||
44 | map_start = mem_map + cstart; | ||
45 | map_end = mem_map + cend; | ||
46 | |||
47 | if (map_start < start) | ||
48 | map_start = start; | ||
49 | if (map_end > end) | ||
50 | map_end = end; | ||
51 | |||
52 | map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) | ||
53 | / sizeof(struct page); | ||
54 | map_end += ((PFN_ALIGN((unsigned long) map_end) | ||
55 | - (unsigned long) map_end) | ||
56 | / sizeof(struct page)); | ||
57 | |||
58 | if (map_start < map_end) | ||
59 | memmap_init_zone((unsigned long)(map_end - map_start), | ||
60 | nid, zone, page_to_pfn(map_start), | ||
61 | MEMMAP_EARLY); | ||
62 | } | ||
63 | } | ||
64 | |||
65 | static void __ref *vmem_alloc_pages(unsigned int order) | 30 | static void __ref *vmem_alloc_pages(unsigned int order) |
66 | { | 31 | { |
67 | if (slab_is_available()) | 32 | if (slab_is_available()) |
@@ -77,8 +42,7 @@ static inline pud_t *vmem_pud_alloc(void) | |||
77 | pud = vmem_alloc_pages(2); | 42 | pud = vmem_alloc_pages(2); |
78 | if (!pud) | 43 | if (!pud) |
79 | return NULL; | 44 | return NULL; |
80 | pud_val(*pud) = _REGION3_ENTRY_EMPTY; | 45 | clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); |
81 | memcpy(pud + 1, pud, (PTRS_PER_PUD - 1)*sizeof(pud_t)); | ||
82 | #endif | 46 | #endif |
83 | return pud; | 47 | return pud; |
84 | } | 48 | } |
@@ -91,7 +55,7 @@ static inline pmd_t *vmem_pmd_alloc(void) | |||
91 | pmd = vmem_alloc_pages(2); | 55 | pmd = vmem_alloc_pages(2); |
92 | if (!pmd) | 56 | if (!pmd) |
93 | return NULL; | 57 | return NULL; |
94 | clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE*4); | 58 | clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); |
95 | #endif | 59 | #endif |
96 | return pmd; | 60 | return pmd; |
97 | } | 61 | } |
@@ -114,7 +78,7 @@ static pte_t __init_refok *vmem_pte_alloc(void) | |||
114 | /* | 78 | /* |
115 | * Add a physical memory range to the 1:1 mapping. | 79 | * Add a physical memory range to the 1:1 mapping. |
116 | */ | 80 | */ |
117 | static int vmem_add_range(unsigned long start, unsigned long size) | 81 | static int vmem_add_mem(unsigned long start, unsigned long size, int ro) |
118 | { | 82 | { |
119 | unsigned long address; | 83 | unsigned long address; |
120 | pgd_t *pg_dir; | 84 | pgd_t *pg_dir; |
@@ -141,7 +105,19 @@ static int vmem_add_range(unsigned long start, unsigned long size) | |||
141 | pud_populate_kernel(&init_mm, pu_dir, pm_dir); | 105 | pud_populate_kernel(&init_mm, pu_dir, pm_dir); |
142 | } | 106 | } |
143 | 107 | ||
108 | pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0)); | ||
144 | pm_dir = pmd_offset(pu_dir, address); | 109 | pm_dir = pmd_offset(pu_dir, address); |
110 | |||
111 | #ifdef __s390x__ | ||
112 | if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) && | ||
113 | (address + HPAGE_SIZE <= start + size) && | ||
114 | (address >= HPAGE_SIZE)) { | ||
115 | pte_val(pte) |= _SEGMENT_ENTRY_LARGE; | ||
116 | pmd_val(*pm_dir) = pte_val(pte); | ||
117 | address += HPAGE_SIZE - PAGE_SIZE; | ||
118 | continue; | ||
119 | } | ||
120 | #endif | ||
145 | if (pmd_none(*pm_dir)) { | 121 | if (pmd_none(*pm_dir)) { |
146 | pt_dir = vmem_pte_alloc(); | 122 | pt_dir = vmem_pte_alloc(); |
147 | if (!pt_dir) | 123 | if (!pt_dir) |
@@ -150,7 +126,6 @@ static int vmem_add_range(unsigned long start, unsigned long size) | |||
150 | } | 126 | } |
151 | 127 | ||
152 | pt_dir = pte_offset_kernel(pm_dir, address); | 128 | pt_dir = pte_offset_kernel(pm_dir, address); |
153 | pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL); | ||
154 | *pt_dir = pte; | 129 | *pt_dir = pte; |
155 | } | 130 | } |
156 | ret = 0; | 131 | ret = 0; |
@@ -181,6 +156,13 @@ static void vmem_remove_range(unsigned long start, unsigned long size) | |||
181 | pm_dir = pmd_offset(pu_dir, address); | 156 | pm_dir = pmd_offset(pu_dir, address); |
182 | if (pmd_none(*pm_dir)) | 157 | if (pmd_none(*pm_dir)) |
183 | continue; | 158 | continue; |
159 | |||
160 | if (pmd_huge(*pm_dir)) { | ||
161 | pmd_clear_kernel(pm_dir); | ||
162 | address += HPAGE_SIZE - PAGE_SIZE; | ||
163 | continue; | ||
164 | } | ||
165 | |||
184 | pt_dir = pte_offset_kernel(pm_dir, address); | 166 | pt_dir = pte_offset_kernel(pm_dir, address); |
185 | *pt_dir = pte; | 167 | *pt_dir = pte; |
186 | } | 168 | } |
@@ -190,10 +172,9 @@ static void vmem_remove_range(unsigned long start, unsigned long size) | |||
190 | /* | 172 | /* |
191 | * Add a backed mem_map array to the virtual mem_map array. | 173 | * Add a backed mem_map array to the virtual mem_map array. |
192 | */ | 174 | */ |
193 | static int vmem_add_mem_map(unsigned long start, unsigned long size) | 175 | int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) |
194 | { | 176 | { |
195 | unsigned long address, start_addr, end_addr; | 177 | unsigned long address, start_addr, end_addr; |
196 | struct page *map_start, *map_end; | ||
197 | pgd_t *pg_dir; | 178 | pgd_t *pg_dir; |
198 | pud_t *pu_dir; | 179 | pud_t *pu_dir; |
199 | pmd_t *pm_dir; | 180 | pmd_t *pm_dir; |
@@ -201,11 +182,8 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size) | |||
201 | pte_t pte; | 182 | pte_t pte; |
202 | int ret = -ENOMEM; | 183 | int ret = -ENOMEM; |
203 | 184 | ||
204 | map_start = VMEM_MAP + PFN_DOWN(start); | 185 | start_addr = (unsigned long) start; |
205 | map_end = VMEM_MAP + PFN_DOWN(start + size); | 186 | end_addr = (unsigned long) (start + nr); |
206 | |||
207 | start_addr = (unsigned long) map_start & PAGE_MASK; | ||
208 | end_addr = PFN_ALIGN((unsigned long) map_end); | ||
209 | 187 | ||
210 | for (address = start_addr; address < end_addr; address += PAGE_SIZE) { | 188 | for (address = start_addr; address < end_addr; address += PAGE_SIZE) { |
211 | pg_dir = pgd_offset_k(address); | 189 | pg_dir = pgd_offset_k(address); |
@@ -249,16 +227,6 @@ out: | |||
249 | return ret; | 227 | return ret; |
250 | } | 228 | } |
251 | 229 | ||
252 | static int vmem_add_mem(unsigned long start, unsigned long size) | ||
253 | { | ||
254 | int ret; | ||
255 | |||
256 | ret = vmem_add_mem_map(start, size); | ||
257 | if (ret) | ||
258 | return ret; | ||
259 | return vmem_add_range(start, size); | ||
260 | } | ||
261 | |||
262 | /* | 230 | /* |
263 | * Add memory segment to the segment list if it doesn't overlap with | 231 | * Add memory segment to the segment list if it doesn't overlap with |
264 | * an already present segment. | 232 | * an already present segment. |
@@ -296,7 +264,7 @@ static void __remove_shared_memory(struct memory_segment *seg) | |||
296 | vmem_remove_range(seg->start, seg->size); | 264 | vmem_remove_range(seg->start, seg->size); |
297 | } | 265 | } |
298 | 266 | ||
299 | int remove_shared_memory(unsigned long start, unsigned long size) | 267 | int vmem_remove_mapping(unsigned long start, unsigned long size) |
300 | { | 268 | { |
301 | struct memory_segment *seg; | 269 | struct memory_segment *seg; |
302 | int ret; | 270 | int ret; |
@@ -320,11 +288,9 @@ out: | |||
320 | return ret; | 288 | return ret; |
321 | } | 289 | } |
322 | 290 | ||
323 | int add_shared_memory(unsigned long start, unsigned long size) | 291 | int vmem_add_mapping(unsigned long start, unsigned long size) |
324 | { | 292 | { |
325 | struct memory_segment *seg; | 293 | struct memory_segment *seg; |
326 | struct page *page; | ||
327 | unsigned long pfn, num_pfn, end_pfn; | ||
328 | int ret; | 294 | int ret; |
329 | 295 | ||
330 | mutex_lock(&vmem_mutex); | 296 | mutex_lock(&vmem_mutex); |
@@ -339,24 +305,9 @@ int add_shared_memory(unsigned long start, unsigned long size) | |||
339 | if (ret) | 305 | if (ret) |
340 | goto out_free; | 306 | goto out_free; |
341 | 307 | ||
342 | ret = vmem_add_mem(start, size); | 308 | ret = vmem_add_mem(start, size, 0); |
343 | if (ret) | 309 | if (ret) |
344 | goto out_remove; | 310 | goto out_remove; |
345 | |||
346 | pfn = PFN_DOWN(start); | ||
347 | num_pfn = PFN_DOWN(size); | ||
348 | end_pfn = pfn + num_pfn; | ||
349 | |||
350 | page = pfn_to_page(pfn); | ||
351 | memset(page, 0, num_pfn * sizeof(struct page)); | ||
352 | |||
353 | for (; pfn < end_pfn; pfn++) { | ||
354 | page = pfn_to_page(pfn); | ||
355 | init_page_count(page); | ||
356 | reset_page_mapcount(page); | ||
357 | SetPageReserved(page); | ||
358 | INIT_LIST_HEAD(&page->lru); | ||
359 | } | ||
360 | goto out; | 311 | goto out; |
361 | 312 | ||
362 | out_remove: | 313 | out_remove: |
@@ -375,14 +326,34 @@ out: | |||
375 | */ | 326 | */ |
376 | void __init vmem_map_init(void) | 327 | void __init vmem_map_init(void) |
377 | { | 328 | { |
329 | unsigned long ro_start, ro_end; | ||
330 | unsigned long start, end; | ||
378 | int i; | 331 | int i; |
379 | 332 | ||
380 | INIT_LIST_HEAD(&init_mm.context.crst_list); | 333 | INIT_LIST_HEAD(&init_mm.context.crst_list); |
381 | INIT_LIST_HEAD(&init_mm.context.pgtable_list); | 334 | INIT_LIST_HEAD(&init_mm.context.pgtable_list); |
382 | init_mm.context.noexec = 0; | 335 | init_mm.context.noexec = 0; |
383 | NODE_DATA(0)->node_mem_map = VMEM_MAP; | 336 | ro_start = ((unsigned long)&_stext) & PAGE_MASK; |
384 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) | 337 | ro_end = PFN_ALIGN((unsigned long)&_eshared); |
385 | vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size); | 338 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { |
339 | start = memory_chunk[i].addr; | ||
340 | end = memory_chunk[i].addr + memory_chunk[i].size; | ||
341 | if (start >= ro_end || end <= ro_start) | ||
342 | vmem_add_mem(start, end - start, 0); | ||
343 | else if (start >= ro_start && end <= ro_end) | ||
344 | vmem_add_mem(start, end - start, 1); | ||
345 | else if (start >= ro_start) { | ||
346 | vmem_add_mem(start, ro_end - start, 1); | ||
347 | vmem_add_mem(ro_end, end - ro_end, 0); | ||
348 | } else if (end < ro_end) { | ||
349 | vmem_add_mem(start, ro_start - start, 0); | ||
350 | vmem_add_mem(ro_start, end - ro_start, 1); | ||
351 | } else { | ||
352 | vmem_add_mem(start, ro_start - start, 0); | ||
353 | vmem_add_mem(ro_start, ro_end - ro_start, 1); | ||
354 | vmem_add_mem(ro_end, end - ro_end, 0); | ||
355 | } | ||
356 | } | ||
386 | } | 357 | } |
387 | 358 | ||
388 | /* | 359 | /* |