diff options
-rw-r--r-- | arch/s390/Kconfig | 8 | ||||
-rw-r--r-- | arch/s390/mm/extmem.c | 8 | ||||
-rw-r--r-- | arch/s390/mm/init.c | 2 | ||||
-rw-r--r-- | arch/s390/mm/vmem.c | 81 | ||||
-rw-r--r-- | drivers/s390/kvm/kvm_virtio.c | 23 | ||||
-rw-r--r-- | include/asm-s390/page.h | 20 | ||||
-rw-r--r-- | include/asm-s390/pgtable.h | 9 | ||||
-rw-r--r-- | include/asm-s390/sparsemem.h | 18 |
8 files changed, 53 insertions, 116 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 8f5f02160ffc..29a7940f284f 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -300,6 +300,14 @@ comment "Kernel preemption" | |||
300 | 300 | ||
301 | source "kernel/Kconfig.preempt" | 301 | source "kernel/Kconfig.preempt" |
302 | 302 | ||
303 | config ARCH_SPARSEMEM_ENABLE | ||
304 | def_bool y | ||
305 | select SPARSEMEM_VMEMMAP_ENABLE | ||
306 | select SPARSEMEM_VMEMMAP | ||
307 | |||
308 | config ARCH_SPARSEMEM_DEFAULT | ||
309 | def_bool y | ||
310 | |||
303 | source "mm/Kconfig" | 311 | source "mm/Kconfig" |
304 | 312 | ||
305 | comment "I/O subsystem configuration" | 313 | comment "I/O subsystem configuration" |
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index ed2af0a3303b..f231f5ec74b6 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c | |||
@@ -287,7 +287,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long | |||
287 | if (rc < 0) | 287 | if (rc < 0) |
288 | goto out_free; | 288 | goto out_free; |
289 | 289 | ||
290 | rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); | 290 | rc = vmem_add_mapping(seg->start_addr, seg->end - seg->start_addr + 1); |
291 | 291 | ||
292 | if (rc) | 292 | if (rc) |
293 | goto out_free; | 293 | goto out_free; |
@@ -351,7 +351,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long | |||
351 | release_resource(seg->res); | 351 | release_resource(seg->res); |
352 | kfree(seg->res); | 352 | kfree(seg->res); |
353 | out_shared: | 353 | out_shared: |
354 | remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); | 354 | vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1); |
355 | out_free: | 355 | out_free: |
356 | kfree(seg); | 356 | kfree(seg); |
357 | out: | 357 | out: |
@@ -474,7 +474,7 @@ segment_modify_shared (char *name, int do_nonshared) | |||
474 | rc = 0; | 474 | rc = 0; |
475 | goto out_unlock; | 475 | goto out_unlock; |
476 | out_del: | 476 | out_del: |
477 | remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); | 477 | vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1); |
478 | list_del(&seg->list); | 478 | list_del(&seg->list); |
479 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy); | 479 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy); |
480 | kfree(seg); | 480 | kfree(seg); |
@@ -508,7 +508,7 @@ segment_unload(char *name) | |||
508 | goto out_unlock; | 508 | goto out_unlock; |
509 | release_resource(seg->res); | 509 | release_resource(seg->res); |
510 | kfree(seg->res); | 510 | kfree(seg->res); |
511 | remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); | 511 | vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1); |
512 | list_del(&seg->list); | 512 | list_del(&seg->list); |
513 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy); | 513 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy); |
514 | kfree(seg); | 514 | kfree(seg); |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index acc92f46a096..fa31de6ae97a 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -106,6 +106,8 @@ void __init paging_init(void) | |||
106 | __ctl_load(S390_lowcore.kernel_asce, 13, 13); | 106 | __ctl_load(S390_lowcore.kernel_asce, 13, 13); |
107 | __raw_local_irq_ssm(ssm_mask); | 107 | __raw_local_irq_ssm(ssm_mask); |
108 | 108 | ||
109 | sparse_memory_present_with_active_regions(MAX_NUMNODES); | ||
110 | sparse_init(); | ||
109 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | 111 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
110 | #ifdef CONFIG_ZONE_DMA | 112 | #ifdef CONFIG_ZONE_DMA |
111 | max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); | 113 | max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); |
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 97bce6c97574..beccacf907f3 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
@@ -27,43 +27,6 @@ struct memory_segment { | |||
27 | 27 | ||
28 | static LIST_HEAD(mem_segs); | 28 | static LIST_HEAD(mem_segs); |
29 | 29 | ||
30 | void __meminit memmap_init(unsigned long size, int nid, unsigned long zone, | ||
31 | unsigned long start_pfn) | ||
32 | { | ||
33 | struct page *start, *end; | ||
34 | struct page *map_start, *map_end; | ||
35 | int i; | ||
36 | |||
37 | start = pfn_to_page(start_pfn); | ||
38 | end = start + size; | ||
39 | |||
40 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | ||
41 | unsigned long cstart, cend; | ||
42 | |||
43 | cstart = PFN_DOWN(memory_chunk[i].addr); | ||
44 | cend = cstart + PFN_DOWN(memory_chunk[i].size); | ||
45 | |||
46 | map_start = mem_map + cstart; | ||
47 | map_end = mem_map + cend; | ||
48 | |||
49 | if (map_start < start) | ||
50 | map_start = start; | ||
51 | if (map_end > end) | ||
52 | map_end = end; | ||
53 | |||
54 | map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) | ||
55 | / sizeof(struct page); | ||
56 | map_end += ((PFN_ALIGN((unsigned long) map_end) | ||
57 | - (unsigned long) map_end) | ||
58 | / sizeof(struct page)); | ||
59 | |||
60 | if (map_start < map_end) | ||
61 | memmap_init_zone((unsigned long)(map_end - map_start), | ||
62 | nid, zone, page_to_pfn(map_start), | ||
63 | MEMMAP_EARLY); | ||
64 | } | ||
65 | } | ||
66 | |||
67 | static void __ref *vmem_alloc_pages(unsigned int order) | 30 | static void __ref *vmem_alloc_pages(unsigned int order) |
68 | { | 31 | { |
69 | if (slab_is_available()) | 32 | if (slab_is_available()) |
@@ -115,7 +78,7 @@ static pte_t __init_refok *vmem_pte_alloc(void) | |||
115 | /* | 78 | /* |
116 | * Add a physical memory range to the 1:1 mapping. | 79 | * Add a physical memory range to the 1:1 mapping. |
117 | */ | 80 | */ |
118 | static int vmem_add_range(unsigned long start, unsigned long size, int ro) | 81 | static int vmem_add_mem(unsigned long start, unsigned long size, int ro) |
119 | { | 82 | { |
120 | unsigned long address; | 83 | unsigned long address; |
121 | pgd_t *pg_dir; | 84 | pgd_t *pg_dir; |
@@ -209,10 +172,9 @@ static void vmem_remove_range(unsigned long start, unsigned long size) | |||
209 | /* | 172 | /* |
210 | * Add a backed mem_map array to the virtual mem_map array. | 173 | * Add a backed mem_map array to the virtual mem_map array. |
211 | */ | 174 | */ |
212 | static int vmem_add_mem_map(unsigned long start, unsigned long size) | 175 | int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) |
213 | { | 176 | { |
214 | unsigned long address, start_addr, end_addr; | 177 | unsigned long address, start_addr, end_addr; |
215 | struct page *map_start, *map_end; | ||
216 | pgd_t *pg_dir; | 178 | pgd_t *pg_dir; |
217 | pud_t *pu_dir; | 179 | pud_t *pu_dir; |
218 | pmd_t *pm_dir; | 180 | pmd_t *pm_dir; |
@@ -220,11 +182,8 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size) | |||
220 | pte_t pte; | 182 | pte_t pte; |
221 | int ret = -ENOMEM; | 183 | int ret = -ENOMEM; |
222 | 184 | ||
223 | map_start = VMEM_MAP + PFN_DOWN(start); | 185 | start_addr = (unsigned long) start; |
224 | map_end = VMEM_MAP + PFN_DOWN(start + size); | 186 | end_addr = (unsigned long) (start + nr); |
225 | |||
226 | start_addr = (unsigned long) map_start & PAGE_MASK; | ||
227 | end_addr = PFN_ALIGN((unsigned long) map_end); | ||
228 | 187 | ||
229 | for (address = start_addr; address < end_addr; address += PAGE_SIZE) { | 188 | for (address = start_addr; address < end_addr; address += PAGE_SIZE) { |
230 | pg_dir = pgd_offset_k(address); | 189 | pg_dir = pgd_offset_k(address); |
@@ -268,16 +227,6 @@ out: | |||
268 | return ret; | 227 | return ret; |
269 | } | 228 | } |
270 | 229 | ||
271 | static int vmem_add_mem(unsigned long start, unsigned long size, int ro) | ||
272 | { | ||
273 | int ret; | ||
274 | |||
275 | ret = vmem_add_mem_map(start, size); | ||
276 | if (ret) | ||
277 | return ret; | ||
278 | return vmem_add_range(start, size, ro); | ||
279 | } | ||
280 | |||
281 | /* | 230 | /* |
282 | * Add memory segment to the segment list if it doesn't overlap with | 231 | * Add memory segment to the segment list if it doesn't overlap with |
283 | * an already present segment. | 232 | * an already present segment. |
@@ -315,7 +264,7 @@ static void __remove_shared_memory(struct memory_segment *seg) | |||
315 | vmem_remove_range(seg->start, seg->size); | 264 | vmem_remove_range(seg->start, seg->size); |
316 | } | 265 | } |
317 | 266 | ||
318 | int remove_shared_memory(unsigned long start, unsigned long size) | 267 | int vmem_remove_mapping(unsigned long start, unsigned long size) |
319 | { | 268 | { |
320 | struct memory_segment *seg; | 269 | struct memory_segment *seg; |
321 | int ret; | 270 | int ret; |
@@ -339,11 +288,9 @@ out: | |||
339 | return ret; | 288 | return ret; |
340 | } | 289 | } |
341 | 290 | ||
342 | int add_shared_memory(unsigned long start, unsigned long size) | 291 | int vmem_add_mapping(unsigned long start, unsigned long size) |
343 | { | 292 | { |
344 | struct memory_segment *seg; | 293 | struct memory_segment *seg; |
345 | struct page *page; | ||
346 | unsigned long pfn, num_pfn, end_pfn; | ||
347 | int ret; | 294 | int ret; |
348 | 295 | ||
349 | mutex_lock(&vmem_mutex); | 296 | mutex_lock(&vmem_mutex); |
@@ -361,21 +308,6 @@ int add_shared_memory(unsigned long start, unsigned long size) | |||
361 | ret = vmem_add_mem(start, size, 0); | 308 | ret = vmem_add_mem(start, size, 0); |
362 | if (ret) | 309 | if (ret) |
363 | goto out_remove; | 310 | goto out_remove; |
364 | |||
365 | pfn = PFN_DOWN(start); | ||
366 | num_pfn = PFN_DOWN(size); | ||
367 | end_pfn = pfn + num_pfn; | ||
368 | |||
369 | page = pfn_to_page(pfn); | ||
370 | memset(page, 0, num_pfn * sizeof(struct page)); | ||
371 | |||
372 | for (; pfn < end_pfn; pfn++) { | ||
373 | page = pfn_to_page(pfn); | ||
374 | init_page_count(page); | ||
375 | reset_page_mapcount(page); | ||
376 | SetPageReserved(page); | ||
377 | INIT_LIST_HEAD(&page->lru); | ||
378 | } | ||
379 | goto out; | 311 | goto out; |
380 | 312 | ||
381 | out_remove: | 313 | out_remove: |
@@ -401,7 +333,6 @@ void __init vmem_map_init(void) | |||
401 | INIT_LIST_HEAD(&init_mm.context.crst_list); | 333 | INIT_LIST_HEAD(&init_mm.context.crst_list); |
402 | INIT_LIST_HEAD(&init_mm.context.pgtable_list); | 334 | INIT_LIST_HEAD(&init_mm.context.pgtable_list); |
403 | init_mm.context.noexec = 0; | 335 | init_mm.context.noexec = 0; |
404 | NODE_DATA(0)->node_mem_map = VMEM_MAP; | ||
405 | ro_start = ((unsigned long)&_stext) & PAGE_MASK; | 336 | ro_start = ((unsigned long)&_stext) & PAGE_MASK; |
406 | ro_end = PFN_ALIGN((unsigned long)&_eshared); | 337 | ro_end = PFN_ALIGN((unsigned long)&_eshared); |
407 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | 338 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { |
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c index bbef3764fbf8..47a7e6200b26 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/kvm/kvm_virtio.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/virtio_config.h> | 17 | #include <linux/virtio_config.h> |
18 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
19 | #include <linux/virtio_ring.h> | 19 | #include <linux/virtio_ring.h> |
20 | #include <linux/pfn.h> | ||
20 | #include <asm/io.h> | 21 | #include <asm/io.h> |
21 | #include <asm/kvm_para.h> | 22 | #include <asm/kvm_para.h> |
22 | #include <asm/kvm_virtio.h> | 23 | #include <asm/kvm_virtio.h> |
@@ -180,11 +181,10 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, | |||
180 | 181 | ||
181 | config = kvm_vq_config(kdev->desc)+index; | 182 | config = kvm_vq_config(kdev->desc)+index; |
182 | 183 | ||
183 | if (add_shared_memory(config->address, | 184 | err = vmem_add_mapping(config->address, |
184 | vring_size(config->num, PAGE_SIZE))) { | 185 | vring_size(config->num, PAGE_SIZE)); |
185 | err = -ENOMEM; | 186 | if (err) |
186 | goto out; | 187 | goto out; |
187 | } | ||
188 | 188 | ||
189 | vq = vring_new_virtqueue(config->num, vdev, (void *) config->address, | 189 | vq = vring_new_virtqueue(config->num, vdev, (void *) config->address, |
190 | kvm_notify, callback); | 190 | kvm_notify, callback); |
@@ -202,8 +202,8 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, | |||
202 | vq->priv = config; | 202 | vq->priv = config; |
203 | return vq; | 203 | return vq; |
204 | unmap: | 204 | unmap: |
205 | remove_shared_memory(config->address, vring_size(config->num, | 205 | vmem_remove_mapping(config->address, |
206 | PAGE_SIZE)); | 206 | vring_size(config->num, PAGE_SIZE)); |
207 | out: | 207 | out: |
208 | return ERR_PTR(err); | 208 | return ERR_PTR(err); |
209 | } | 209 | } |
@@ -213,8 +213,8 @@ static void kvm_del_vq(struct virtqueue *vq) | |||
213 | struct kvm_vqconfig *config = vq->priv; | 213 | struct kvm_vqconfig *config = vq->priv; |
214 | 214 | ||
215 | vring_del_virtqueue(vq); | 215 | vring_del_virtqueue(vq); |
216 | remove_shared_memory(config->address, | 216 | vmem_remove_mapping(config->address, |
217 | vring_size(config->num, PAGE_SIZE)); | 217 | vring_size(config->num, PAGE_SIZE)); |
218 | } | 218 | } |
219 | 219 | ||
220 | /* | 220 | /* |
@@ -318,12 +318,13 @@ static int __init kvm_devices_init(void) | |||
318 | return rc; | 318 | return rc; |
319 | } | 319 | } |
320 | 320 | ||
321 | if (add_shared_memory((max_pfn) << PAGE_SHIFT, PAGE_SIZE)) { | 321 | rc = vmem_add_mapping(PFN_PHYS(max_pfn), PAGE_SIZE); |
322 | if (rc) { | ||
322 | device_unregister(&kvm_root); | 323 | device_unregister(&kvm_root); |
323 | return -ENOMEM; | 324 | return rc; |
324 | } | 325 | } |
325 | 326 | ||
326 | kvm_devices = (void *) (max_pfn << PAGE_SHIFT); | 327 | kvm_devices = (void *) PFN_PHYS(max_pfn); |
327 | 328 | ||
328 | ctl_set_bit(0, 9); | 329 | ctl_set_bit(0, 9); |
329 | register_external_interrupt(0x2603, kvm_extint_handler); | 330 | register_external_interrupt(0x2603, kvm_extint_handler); |
diff --git a/include/asm-s390/page.h b/include/asm-s390/page.h index b01e6fc9a295..f0f4579eac13 100644 --- a/include/asm-s390/page.h +++ b/include/asm-s390/page.h | |||
@@ -125,26 +125,6 @@ page_get_storage_key(unsigned long addr) | |||
125 | return skey; | 125 | return skey; |
126 | } | 126 | } |
127 | 127 | ||
128 | extern unsigned long max_pfn; | ||
129 | |||
130 | static inline int pfn_valid(unsigned long pfn) | ||
131 | { | ||
132 | unsigned long dummy; | ||
133 | int ccode; | ||
134 | |||
135 | if (pfn >= max_pfn) | ||
136 | return 0; | ||
137 | |||
138 | asm volatile( | ||
139 | " lra %0,0(%2)\n" | ||
140 | " ipm %1\n" | ||
141 | " srl %1,28\n" | ||
142 | : "=d" (dummy), "=d" (ccode) | ||
143 | : "a" (pfn << PAGE_SHIFT) | ||
144 | : "cc"); | ||
145 | return !ccode; | ||
146 | } | ||
147 | |||
148 | #endif /* !__ASSEMBLY__ */ | 128 | #endif /* !__ASSEMBLY__ */ |
149 | 129 | ||
150 | /* to align the pointer to the (next) page boundary */ | 130 | /* to align the pointer to the (next) page boundary */ |
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h index fd336f2e2a7a..c7f4f8e3e297 100644 --- a/include/asm-s390/pgtable.h +++ b/include/asm-s390/pgtable.h | |||
@@ -129,7 +129,7 @@ extern char empty_zero_page[PAGE_SIZE]; | |||
129 | #define VMEM_MAX_PAGES ((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page)) | 129 | #define VMEM_MAX_PAGES ((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page)) |
130 | #define VMEM_MAX_PFN min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES) | 130 | #define VMEM_MAX_PFN min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES) |
131 | #define VMEM_MAX_PHYS ((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1)) | 131 | #define VMEM_MAX_PHYS ((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1)) |
132 | #define VMEM_MAP ((struct page *) VMALLOC_END) | 132 | #define vmemmap ((struct page *) VMALLOC_END) |
133 | 133 | ||
134 | /* | 134 | /* |
135 | * A 31 bit pagetable entry of S390 has following format: | 135 | * A 31 bit pagetable entry of S390 has following format: |
@@ -1075,8 +1075,8 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) | |||
1075 | 1075 | ||
1076 | #define kern_addr_valid(addr) (1) | 1076 | #define kern_addr_valid(addr) (1) |
1077 | 1077 | ||
1078 | extern int add_shared_memory(unsigned long start, unsigned long size); | 1078 | extern int vmem_add_mapping(unsigned long start, unsigned long size); |
1079 | extern int remove_shared_memory(unsigned long start, unsigned long size); | 1079 | extern int vmem_remove_mapping(unsigned long start, unsigned long size); |
1080 | extern int s390_enable_sie(void); | 1080 | extern int s390_enable_sie(void); |
1081 | 1081 | ||
1082 | /* | 1082 | /* |
@@ -1084,9 +1084,6 @@ extern int s390_enable_sie(void); | |||
1084 | */ | 1084 | */ |
1085 | #define pgtable_cache_init() do { } while (0) | 1085 | #define pgtable_cache_init() do { } while (0) |
1086 | 1086 | ||
1087 | #define __HAVE_ARCH_MEMMAP_INIT | ||
1088 | extern void memmap_init(unsigned long, int, unsigned long, unsigned long); | ||
1089 | |||
1090 | #include <asm-generic/pgtable.h> | 1087 | #include <asm-generic/pgtable.h> |
1091 | 1088 | ||
1092 | #endif /* _S390_PAGE_H */ | 1089 | #endif /* _S390_PAGE_H */ |
diff --git a/include/asm-s390/sparsemem.h b/include/asm-s390/sparsemem.h new file mode 100644 index 000000000000..06dfdab6c0e8 --- /dev/null +++ b/include/asm-s390/sparsemem.h | |||
@@ -0,0 +1,18 @@ | |||
1 | #ifndef _ASM_S390_SPARSEMEM_H | ||
2 | #define _ASM_S390_SPARSEMEM_H | ||
3 | |||
4 | #define SECTION_SIZE_BITS 25 | ||
5 | |||
6 | #ifdef CONFIG_64BIT | ||
7 | |||
8 | #define MAX_PHYSADDR_BITS 42 | ||
9 | #define MAX_PHYSMEM_BITS 42 | ||
10 | |||
11 | #else | ||
12 | |||
13 | #define MAX_PHYSADDR_BITS 31 | ||
14 | #define MAX_PHYSMEM_BITS 31 | ||
15 | |||
16 | #endif /* CONFIG_64BIT */ | ||
17 | |||
18 | #endif /* _ASM_S390_SPARSEMEM_H */ | ||