aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/Makefile2
-rw-r--r--arch/s390/mm/extmem.c138
-rw-r--r--arch/s390/mm/fault.c28
-rw-r--r--arch/s390/mm/init.c184
-rw-r--r--arch/s390/mm/ioremap.c84
-rw-r--r--arch/s390/mm/vmem.c382
6 files changed, 499 insertions, 319 deletions
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index aa9a42b6e62d..8e09db1edbb9 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -2,6 +2,6 @@
2# Makefile for the linux s390-specific parts of the memory manager. 2# Makefile for the linux s390-specific parts of the memory manager.
3# 3#
4 4
5obj-y := init.o fault.o ioremap.o extmem.o mmap.o 5obj-y := init.o fault.o ioremap.o extmem.o mmap.o vmem.o
6obj-$(CONFIG_CMM) += cmm.o 6obj-$(CONFIG_CMM) += cmm.o
7 7
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 226275d5c4f6..775bf19e742b 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -14,12 +14,14 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/bootmem.h> 16#include <linux/bootmem.h>
17#include <linux/ctype.h>
17#include <asm/page.h> 18#include <asm/page.h>
19#include <asm/pgtable.h>
18#include <asm/ebcdic.h> 20#include <asm/ebcdic.h>
19#include <asm/errno.h> 21#include <asm/errno.h>
20#include <asm/extmem.h> 22#include <asm/extmem.h>
21#include <asm/cpcmd.h> 23#include <asm/cpcmd.h>
22#include <linux/ctype.h> 24#include <asm/setup.h>
23 25
24#define DCSS_DEBUG /* Debug messages on/off */ 26#define DCSS_DEBUG /* Debug messages on/off */
25 27
@@ -77,15 +79,11 @@ struct dcss_segment {
77 int segcnt; 79 int segcnt;
78}; 80};
79 81
80static DEFINE_SPINLOCK(dcss_lock); 82static DEFINE_MUTEX(dcss_lock);
81static struct list_head dcss_list = LIST_HEAD_INIT(dcss_list); 83static struct list_head dcss_list = LIST_HEAD_INIT(dcss_list);
82static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC", 84static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC",
83 "EW/EN-MIXED" }; 85 "EW/EN-MIXED" };
84 86
85extern struct {
86 unsigned long addr, size, type;
87} memory_chunk[MEMORY_CHUNKS];
88
89/* 87/*
90 * Create the 8 bytes, ebcdic VM segment name from 88 * Create the 8 bytes, ebcdic VM segment name from
91 * an ascii name. 89 * an ascii name.
@@ -117,7 +115,7 @@ segment_by_name (char *name)
117 struct list_head *l; 115 struct list_head *l;
118 struct dcss_segment *tmp, *retval = NULL; 116 struct dcss_segment *tmp, *retval = NULL;
119 117
120 assert_spin_locked(&dcss_lock); 118 BUG_ON(!mutex_is_locked(&dcss_lock));
121 dcss_mkname (name, dcss_name); 119 dcss_mkname (name, dcss_name);
122 list_for_each (l, &dcss_list) { 120 list_for_each (l, &dcss_list) {
123 tmp = list_entry (l, struct dcss_segment, list); 121 tmp = list_entry (l, struct dcss_segment, list);
@@ -241,65 +239,6 @@ query_segment_type (struct dcss_segment *seg)
241} 239}
242 240
243/* 241/*
244 * check if the given segment collides with guest storage.
245 * returns 1 if this is the case, 0 if no collision was found
246 */
247static int
248segment_overlaps_storage(struct dcss_segment *seg)
249{
250 int i;
251
252 for (i=0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
253 if (memory_chunk[i].type != 0)
254 continue;
255 if ((memory_chunk[i].addr >> 20) > (seg->end >> 20))
256 continue;
257 if (((memory_chunk[i].addr + memory_chunk[i].size - 1) >> 20)
258 < (seg->start_addr >> 20))
259 continue;
260 return 1;
261 }
262 return 0;
263}
264
265/*
266 * check if segment collides with other segments that are currently loaded
267 * returns 1 if this is the case, 0 if no collision was found
268 */
269static int
270segment_overlaps_others (struct dcss_segment *seg)
271{
272 struct list_head *l;
273 struct dcss_segment *tmp;
274
275 assert_spin_locked(&dcss_lock);
276 list_for_each(l, &dcss_list) {
277 tmp = list_entry(l, struct dcss_segment, list);
278 if ((tmp->start_addr >> 20) > (seg->end >> 20))
279 continue;
280 if ((tmp->end >> 20) < (seg->start_addr >> 20))
281 continue;
282 if (seg == tmp)
283 continue;
284 return 1;
285 }
286 return 0;
287}
288
289/*
290 * check if segment exceeds the kernel mapping range (detected or set via mem=)
291 * returns 1 if this is the case, 0 if segment fits into the range
292 */
293static inline int
294segment_exceeds_range (struct dcss_segment *seg)
295{
296 int seg_last_pfn = (seg->end) >> PAGE_SHIFT;
297 if (seg_last_pfn > max_pfn)
298 return 1;
299 return 0;
300}
301
302/*
303 * get info about a segment 242 * get info about a segment
304 * possible return values: 243 * possible return values:
305 * -ENOSYS : we are not running on VM 244 * -ENOSYS : we are not running on VM
@@ -344,24 +283,26 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
344 rc = query_segment_type (seg); 283 rc = query_segment_type (seg);
345 if (rc < 0) 284 if (rc < 0)
346 goto out_free; 285 goto out_free;
347 if (segment_exceeds_range(seg)) { 286
348 PRINT_WARN ("segment_load: not loading segment %s - exceeds" 287 rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
349 " kernel mapping range\n",name); 288
350 rc = -ERANGE; 289 switch (rc) {
290 case 0:
291 break;
292 case -ENOSPC:
293 PRINT_WARN("segment_load: not loading segment %s - overlaps "
294 "storage/segment\n", name);
351 goto out_free; 295 goto out_free;
352 } 296 case -ERANGE:
353 if (segment_overlaps_storage(seg)) { 297 PRINT_WARN("segment_load: not loading segment %s - exceeds "
354 PRINT_WARN ("segment_load: not loading segment %s - overlaps" 298 "kernel mapping range\n", name);
355 " storage\n",name);
356 rc = -ENOSPC;
357 goto out_free; 299 goto out_free;
358 } 300 default:
359 if (segment_overlaps_others(seg)) { 301 PRINT_WARN("segment_load: not loading segment %s (rc: %d)\n",
360 PRINT_WARN ("segment_load: not loading segment %s - overlaps" 302 name, rc);
361 " other segments\n",name);
362 rc = -EBUSY;
363 goto out_free; 303 goto out_free;
364 } 304 }
305
365 if (do_nonshared) 306 if (do_nonshared)
366 dcss_command = DCSS_LOADNSR; 307 dcss_command = DCSS_LOADNSR;
367 else 308 else
@@ -375,7 +316,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
375 rc = dcss_diag_translate_rc (seg->end); 316 rc = dcss_diag_translate_rc (seg->end);
376 dcss_diag(DCSS_PURGESEG, seg->dcss_name, 317 dcss_diag(DCSS_PURGESEG, seg->dcss_name,
377 &seg->start_addr, &seg->end); 318 &seg->start_addr, &seg->end);
378 goto out_free; 319 goto out_shared;
379 } 320 }
380 seg->do_nonshared = do_nonshared; 321 seg->do_nonshared = do_nonshared;
381 atomic_set(&seg->ref_count, 1); 322 atomic_set(&seg->ref_count, 1);
@@ -394,6 +335,8 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
394 (void*)seg->start_addr, (void*)seg->end, 335 (void*)seg->start_addr, (void*)seg->end,
395 segtype_string[seg->vm_segtype]); 336 segtype_string[seg->vm_segtype]);
396 goto out; 337 goto out;
338 out_shared:
339 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
397 out_free: 340 out_free:
398 kfree(seg); 341 kfree(seg);
399 out: 342 out:
@@ -429,7 +372,7 @@ segment_load (char *name, int do_nonshared, unsigned long *addr,
429 if (!MACHINE_IS_VM) 372 if (!MACHINE_IS_VM)
430 return -ENOSYS; 373 return -ENOSYS;
431 374
432 spin_lock (&dcss_lock); 375 mutex_lock(&dcss_lock);
433 seg = segment_by_name (name); 376 seg = segment_by_name (name);
434 if (seg == NULL) 377 if (seg == NULL)
435 rc = __segment_load (name, do_nonshared, addr, end); 378 rc = __segment_load (name, do_nonshared, addr, end);
@@ -444,7 +387,7 @@ segment_load (char *name, int do_nonshared, unsigned long *addr,
444 rc = -EPERM; 387 rc = -EPERM;
445 } 388 }
446 } 389 }
447 spin_unlock (&dcss_lock); 390 mutex_unlock(&dcss_lock);
448 return rc; 391 return rc;
449} 392}
450 393
@@ -467,7 +410,7 @@ segment_modify_shared (char *name, int do_nonshared)
467 unsigned long dummy; 410 unsigned long dummy;
468 int dcss_command, rc, diag_cc; 411 int dcss_command, rc, diag_cc;
469 412
470 spin_lock (&dcss_lock); 413 mutex_lock(&dcss_lock);
471 seg = segment_by_name (name); 414 seg = segment_by_name (name);
472 if (seg == NULL) { 415 if (seg == NULL) {
473 rc = -EINVAL; 416 rc = -EINVAL;
@@ -508,7 +451,7 @@ segment_modify_shared (char *name, int do_nonshared)
508 &dummy, &dummy); 451 &dummy, &dummy);
509 kfree(seg); 452 kfree(seg);
510 out_unlock: 453 out_unlock:
511 spin_unlock(&dcss_lock); 454 mutex_unlock(&dcss_lock);
512 return rc; 455 return rc;
513} 456}
514 457
@@ -526,21 +469,21 @@ segment_unload(char *name)
526 if (!MACHINE_IS_VM) 469 if (!MACHINE_IS_VM)
527 return; 470 return;
528 471
529 spin_lock(&dcss_lock); 472 mutex_lock(&dcss_lock);
530 seg = segment_by_name (name); 473 seg = segment_by_name (name);
531 if (seg == NULL) { 474 if (seg == NULL) {
532 PRINT_ERR ("could not find segment %s in segment_unload, " 475 PRINT_ERR ("could not find segment %s in segment_unload, "
533 "please report to linux390@de.ibm.com\n",name); 476 "please report to linux390@de.ibm.com\n",name);
534 goto out_unlock; 477 goto out_unlock;
535 } 478 }
536 if (atomic_dec_return(&seg->ref_count) == 0) { 479 if (atomic_dec_return(&seg->ref_count) != 0)
537 list_del(&seg->list); 480 goto out_unlock;
538 dcss_diag(DCSS_PURGESEG, seg->dcss_name, 481 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
539 &dummy, &dummy); 482 list_del(&seg->list);
540 kfree(seg); 483 dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
541 } 484 kfree(seg);
542out_unlock: 485out_unlock:
543 spin_unlock(&dcss_lock); 486 mutex_unlock(&dcss_lock);
544} 487}
545 488
546/* 489/*
@@ -559,12 +502,13 @@ segment_save(char *name)
559 if (!MACHINE_IS_VM) 502 if (!MACHINE_IS_VM)
560 return; 503 return;
561 504
562 spin_lock(&dcss_lock); 505 mutex_lock(&dcss_lock);
563 seg = segment_by_name (name); 506 seg = segment_by_name (name);
564 507
565 if (seg == NULL) { 508 if (seg == NULL) {
566 PRINT_ERR ("could not find segment %s in segment_save, please report to linux390@de.ibm.com\n",name); 509 PRINT_ERR("could not find segment %s in segment_save, please "
567 return; 510 "report to linux390@de.ibm.com\n", name);
511 goto out;
568 } 512 }
569 513
570 startpfn = seg->start_addr >> PAGE_SHIFT; 514 startpfn = seg->start_addr >> PAGE_SHIFT;
@@ -591,7 +535,7 @@ segment_save(char *name)
591 goto out; 535 goto out;
592 } 536 }
593out: 537out:
594 spin_unlock(&dcss_lock); 538 mutex_unlock(&dcss_lock);
595} 539}
596 540
597EXPORT_SYMBOL(segment_load); 541EXPORT_SYMBOL(segment_load);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 1c323bbfda91..cd85e34d8703 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -31,6 +31,7 @@
31#include <asm/uaccess.h> 31#include <asm/uaccess.h>
32#include <asm/pgtable.h> 32#include <asm/pgtable.h>
33#include <asm/kdebug.h> 33#include <asm/kdebug.h>
34#include <asm/s390_ext.h>
34 35
35#ifndef CONFIG_64BIT 36#ifndef CONFIG_64BIT
36#define __FAIL_ADDR_MASK 0x7ffff000 37#define __FAIL_ADDR_MASK 0x7ffff000
@@ -394,6 +395,7 @@ void do_dat_exception(struct pt_regs *regs, unsigned long error_code)
394/* 395/*
395 * 'pfault' pseudo page faults routines. 396 * 'pfault' pseudo page faults routines.
396 */ 397 */
398static ext_int_info_t ext_int_pfault;
397static int pfault_disable = 0; 399static int pfault_disable = 0;
398 400
399static int __init nopfault(char *str) 401static int __init nopfault(char *str)
@@ -422,7 +424,7 @@ int pfault_init(void)
422 __PF_RES_FIELD }; 424 __PF_RES_FIELD };
423 int rc; 425 int rc;
424 426
425 if (pfault_disable) 427 if (!MACHINE_IS_VM || pfault_disable)
426 return -1; 428 return -1;
427 asm volatile( 429 asm volatile(
428 " diag %1,%0,0x258\n" 430 " diag %1,%0,0x258\n"
@@ -440,7 +442,7 @@ void pfault_fini(void)
440 pfault_refbk_t refbk = 442 pfault_refbk_t refbk =
441 { 0x258, 1, 5, 2, 0ULL, 0ULL, 0ULL, 0ULL }; 443 { 0x258, 1, 5, 2, 0ULL, 0ULL, 0ULL, 0ULL };
442 444
443 if (pfault_disable) 445 if (!MACHINE_IS_VM || pfault_disable)
444 return; 446 return;
445 __ctl_clear_bit(0,9); 447 __ctl_clear_bit(0,9);
446 asm volatile( 448 asm volatile(
@@ -500,5 +502,25 @@ pfault_interrupt(__u16 error_code)
500 set_tsk_need_resched(tsk); 502 set_tsk_need_resched(tsk);
501 } 503 }
502} 504}
503#endif
504 505
506void __init pfault_irq_init(void)
507{
508 if (!MACHINE_IS_VM)
509 return;
510
511 /*
512 * Try to get pfault pseudo page faults going.
513 */
514 if (register_early_external_interrupt(0x2603, pfault_interrupt,
515 &ext_int_pfault) != 0)
516 panic("Couldn't request external interrupt 0x2603");
517
518 if (pfault_init() == 0)
519 return;
520
521 /* Tough luck, no pfault. */
522 pfault_disable = 1;
523 unregister_early_external_interrupt(0x2603, pfault_interrupt,
524 &ext_int_pfault);
525}
526#endif
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index e1881c31b1cb..4bb21be3b007 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -24,6 +24,7 @@
24#include <linux/pagemap.h> 24#include <linux/pagemap.h>
25#include <linux/bootmem.h> 25#include <linux/bootmem.h>
26#include <linux/pfn.h> 26#include <linux/pfn.h>
27#include <linux/poison.h>
27 28
28#include <asm/processor.h> 29#include <asm/processor.h>
29#include <asm/system.h> 30#include <asm/system.h>
@@ -69,6 +70,8 @@ void show_mem(void)
69 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 70 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
70 i = max_mapnr; 71 i = max_mapnr;
71 while (i-- > 0) { 72 while (i-- > 0) {
73 if (!pfn_valid(i))
74 continue;
72 page = pfn_to_page(i); 75 page = pfn_to_page(i);
73 total++; 76 total++;
74 if (PageReserved(page)) 77 if (PageReserved(page))
@@ -84,150 +87,52 @@ void show_mem(void)
84 printk("%d pages swap cached\n",cached); 87 printk("%d pages swap cached\n",cached);
85} 88}
86 89
87extern unsigned long __initdata zholes_size[]; 90static void __init setup_ro_region(void)
88/*
89 * paging_init() sets up the page tables
90 */
91
92#ifndef CONFIG_64BIT
93void __init paging_init(void)
94{ 91{
95 pgd_t * pg_dir; 92 pgd_t *pgd;
96 pte_t * pg_table; 93 pmd_t *pmd;
97 pte_t pte; 94 pte_t *pte;
98 int i; 95 pte_t new_pte;
99 unsigned long tmp; 96 unsigned long address, end;
100 unsigned long pfn = 0; 97
101 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 98 address = ((unsigned long)&__start_rodata) & PAGE_MASK;
102 static const int ssm_mask = 0x04000000L; 99 end = PFN_ALIGN((unsigned long)&__end_rodata);
103 unsigned long ro_start_pfn, ro_end_pfn; 100
104 unsigned long zones_size[MAX_NR_ZONES]; 101 for (; address < end; address += PAGE_SIZE) {
105 102 pgd = pgd_offset_k(address);
106 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); 103 pmd = pmd_offset(pgd, address);
107 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); 104 pte = pte_offset_kernel(pmd, address);
108 105 new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO));
109 memset(zones_size, 0, sizeof(zones_size)); 106 set_pte(pte, new_pte);
110 zones_size[ZONE_DMA] = max_low_pfn; 107 }
111 free_area_init_node(0, &contig_page_data, zones_size,
112 __pa(PAGE_OFFSET) >> PAGE_SHIFT,
113 zholes_size);
114
115 /* unmap whole virtual address space */
116
117 pg_dir = swapper_pg_dir;
118
119 for (i = 0; i < PTRS_PER_PGD; i++)
120 pmd_clear((pmd_t *) pg_dir++);
121
122 /*
123 * map whole physical memory to virtual memory (identity mapping)
124 */
125
126 pg_dir = swapper_pg_dir;
127
128 while (pfn < max_low_pfn) {
129 /*
130 * pg_table is physical at this point
131 */
132 pg_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
133
134 pmd_populate_kernel(&init_mm, (pmd_t *) pg_dir, pg_table);
135 pg_dir++;
136
137 for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
138 if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
139 pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
140 else
141 pte = pfn_pte(pfn, PAGE_KERNEL);
142 if (pfn >= max_low_pfn)
143 pte_val(pte) = _PAGE_TYPE_EMPTY;
144 set_pte(pg_table, pte);
145 pfn++;
146 }
147 }
148
149 S390_lowcore.kernel_asce = pgdir_k;
150
151 /* enable virtual mapping in kernel mode */
152 __ctl_load(pgdir_k, 1, 1);
153 __ctl_load(pgdir_k, 7, 7);
154 __ctl_load(pgdir_k, 13, 13);
155 __raw_local_irq_ssm(ssm_mask);
156
157 local_flush_tlb();
158} 108}
159 109
160#else /* CONFIG_64BIT */ 110extern void vmem_map_init(void);
161 111
112/*
113 * paging_init() sets up the page tables
114 */
162void __init paging_init(void) 115void __init paging_init(void)
163{ 116{
164 pgd_t * pg_dir; 117 pgd_t *pg_dir;
165 pmd_t * pm_dir; 118 int i;
166 pte_t * pt_dir; 119 unsigned long pgdir_k;
167 pte_t pte;
168 int i,j,k;
169 unsigned long pfn = 0;
170 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
171 _KERN_REGION_TABLE;
172 static const int ssm_mask = 0x04000000L; 120 static const int ssm_mask = 0x04000000L;
173 unsigned long zones_size[MAX_NR_ZONES]; 121 unsigned long max_zone_pfns[MAX_NR_ZONES];
174 unsigned long dma_pfn, high_pfn;
175 unsigned long ro_start_pfn, ro_end_pfn;
176
177 memset(zones_size, 0, sizeof(zones_size));
178 dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
179 high_pfn = max_low_pfn;
180 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
181 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
182
183 if (dma_pfn > high_pfn)
184 zones_size[ZONE_DMA] = high_pfn;
185 else {
186 zones_size[ZONE_DMA] = dma_pfn;
187 zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
188 }
189
190 /* Initialize mem_map[]. */
191 free_area_init_node(0, &contig_page_data, zones_size,
192 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
193 122
194 /* 123 pg_dir = swapper_pg_dir;
195 * map whole physical memory to virtual memory (identity mapping)
196 */
197
198 pg_dir = swapper_pg_dir;
199
200 for (i = 0 ; i < PTRS_PER_PGD ; i++,pg_dir++) {
201 124
202 if (pfn >= max_low_pfn) { 125#ifdef CONFIG_64BIT
203 pgd_clear(pg_dir); 126 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE;
204 continue; 127 for (i = 0; i < PTRS_PER_PGD; i++)
205 } 128 pgd_clear(pg_dir + i);
206 129#else
207 pm_dir = (pmd_t *) alloc_bootmem_pages(PAGE_SIZE * 4); 130 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
208 pgd_populate(&init_mm, pg_dir, pm_dir); 131 for (i = 0; i < PTRS_PER_PGD; i++)
209 132 pmd_clear((pmd_t *)(pg_dir + i));
210 for (j = 0 ; j < PTRS_PER_PMD ; j++,pm_dir++) { 133#endif
211 if (pfn >= max_low_pfn) { 134 vmem_map_init();
212 pmd_clear(pm_dir); 135 setup_ro_region();
213 continue;
214 }
215
216 pt_dir = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
217 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
218
219 for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
220 if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
221 pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
222 else
223 pte = pfn_pte(pfn, PAGE_KERNEL);
224 if (pfn >= max_low_pfn)
225 pte_val(pte) = _PAGE_TYPE_EMPTY;
226 set_pte(pt_dir, pte);
227 pfn++;
228 }
229 }
230 }
231 136
232 S390_lowcore.kernel_asce = pgdir_k; 137 S390_lowcore.kernel_asce = pgdir_k;
233 138
@@ -237,9 +142,11 @@ void __init paging_init(void)
237 __ctl_load(pgdir_k, 13, 13); 142 __ctl_load(pgdir_k, 13, 13);
238 __raw_local_irq_ssm(ssm_mask); 143 __raw_local_irq_ssm(ssm_mask);
239 144
240 local_flush_tlb(); 145 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
146 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
147 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
148 free_area_init_nodes(max_zone_pfns);
241} 149}
242#endif /* CONFIG_64BIT */
243 150
244void __init mem_init(void) 151void __init mem_init(void)
245{ 152{
@@ -269,6 +176,8 @@ void __init mem_init(void)
269 printk("Write protected kernel read-only data: %#lx - %#lx\n", 176 printk("Write protected kernel read-only data: %#lx - %#lx\n",
270 (unsigned long)&__start_rodata, 177 (unsigned long)&__start_rodata,
271 PFN_ALIGN((unsigned long)&__end_rodata) - 1); 178 PFN_ALIGN((unsigned long)&__end_rodata) - 1);
179 printk("Virtual memmap size: %ldk\n",
180 (max_pfn * sizeof(struct page)) >> 10);
272} 181}
273 182
274void free_initmem(void) 183void free_initmem(void)
@@ -279,6 +188,7 @@ void free_initmem(void)
279 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { 188 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
280 ClearPageReserved(virt_to_page(addr)); 189 ClearPageReserved(virt_to_page(addr));
281 init_page_count(virt_to_page(addr)); 190 init_page_count(virt_to_page(addr));
191 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
282 free_page(addr); 192 free_page(addr);
283 totalram_pages++; 193 totalram_pages++;
284 } 194 }
diff --git a/arch/s390/mm/ioremap.c b/arch/s390/mm/ioremap.c
index 0f6e9ecbefe2..3d2100a4e209 100644
--- a/arch/s390/mm/ioremap.c
+++ b/arch/s390/mm/ioremap.c
@@ -15,87 +15,8 @@
15 15
16#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <asm/io.h> 18#include <linux/io.h>
19#include <asm/pgalloc.h> 19#include <asm/pgalloc.h>
20#include <asm/cacheflush.h>
21#include <asm/tlbflush.h>
22
23static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
24 unsigned long phys_addr, unsigned long flags)
25{
26 unsigned long end;
27 unsigned long pfn;
28
29 address &= ~PMD_MASK;
30 end = address + size;
31 if (end > PMD_SIZE)
32 end = PMD_SIZE;
33 if (address >= end)
34 BUG();
35 pfn = phys_addr >> PAGE_SHIFT;
36 do {
37 if (!pte_none(*pte)) {
38 printk("remap_area_pte: page already exists\n");
39 BUG();
40 }
41 set_pte(pte, pfn_pte(pfn, __pgprot(flags)));
42 address += PAGE_SIZE;
43 pfn++;
44 pte++;
45 } while (address && (address < end));
46}
47
48static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
49 unsigned long phys_addr, unsigned long flags)
50{
51 unsigned long end;
52
53 address &= ~PGDIR_MASK;
54 end = address + size;
55 if (end > PGDIR_SIZE)
56 end = PGDIR_SIZE;
57 phys_addr -= address;
58 if (address >= end)
59 BUG();
60 do {
61 pte_t * pte = pte_alloc_kernel(pmd, address);
62 if (!pte)
63 return -ENOMEM;
64 remap_area_pte(pte, address, end - address, address + phys_addr, flags);
65 address = (address + PMD_SIZE) & PMD_MASK;
66 pmd++;
67 } while (address && (address < end));
68 return 0;
69}
70
71static int remap_area_pages(unsigned long address, unsigned long phys_addr,
72 unsigned long size, unsigned long flags)
73{
74 int error;
75 pgd_t * dir;
76 unsigned long end = address + size;
77
78 phys_addr -= address;
79 dir = pgd_offset(&init_mm, address);
80 flush_cache_all();
81 if (address >= end)
82 BUG();
83 do {
84 pmd_t *pmd;
85 pmd = pmd_alloc(&init_mm, dir, address);
86 error = -ENOMEM;
87 if (!pmd)
88 break;
89 if (remap_area_pmd(pmd, address, end - address,
90 phys_addr + address, flags))
91 break;
92 error = 0;
93 address = (address + PGDIR_SIZE) & PGDIR_MASK;
94 dir++;
95 } while (address && (address < end));
96 flush_tlb_all();
97 return 0;
98}
99 20
100/* 21/*
101 * Generic mapping function (not visible outside): 22 * Generic mapping function (not visible outside):
@@ -122,7 +43,8 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag
122 if (!area) 43 if (!area)
123 return NULL; 44 return NULL;
124 addr = area->addr; 45 addr = area->addr;
125 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { 46 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
47 phys_addr, __pgprot(flags))) {
126 vfree(addr); 48 vfree(addr);
127 return NULL; 49 return NULL;
128 } 50 }
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
new file mode 100644
index 000000000000..cd3d93e8c211
--- /dev/null
+++ b/arch/s390/mm/vmem.c
@@ -0,0 +1,382 @@
1/*
2 * arch/s390/mm/vmem.c
3 *
4 * Copyright IBM Corp. 2006
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/bootmem.h>
9#include <linux/pfn.h>
10#include <linux/mm.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <asm/pgalloc.h>
14#include <asm/pgtable.h>
15#include <asm/setup.h>
16#include <asm/tlbflush.h>
17
18unsigned long vmalloc_end;
19EXPORT_SYMBOL(vmalloc_end);
20
21static struct page *vmem_map;
22static DEFINE_MUTEX(vmem_mutex);
23
24struct memory_segment {
25 struct list_head list;
26 unsigned long start;
27 unsigned long size;
28};
29
30static LIST_HEAD(mem_segs);
31
32void memmap_init(unsigned long size, int nid, unsigned long zone,
33 unsigned long start_pfn)
34{
35 struct page *start, *end;
36 struct page *map_start, *map_end;
37 int i;
38
39 start = pfn_to_page(start_pfn);
40 end = start + size;
41
42 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
43 unsigned long cstart, cend;
44
45 cstart = PFN_DOWN(memory_chunk[i].addr);
46 cend = cstart + PFN_DOWN(memory_chunk[i].size);
47
48 map_start = mem_map + cstart;
49 map_end = mem_map + cend;
50
51 if (map_start < start)
52 map_start = start;
53 if (map_end > end)
54 map_end = end;
55
56 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1))
57 / sizeof(struct page);
58 map_end += ((PFN_ALIGN((unsigned long) map_end)
59 - (unsigned long) map_end)
60 / sizeof(struct page));
61
62 if (map_start < map_end)
63 memmap_init_zone((unsigned long)(map_end - map_start),
64 nid, zone, page_to_pfn(map_start),
65 MEMMAP_EARLY);
66 }
67}
68
69static inline void *vmem_alloc_pages(unsigned int order)
70{
71 if (slab_is_available())
72 return (void *)__get_free_pages(GFP_KERNEL, order);
73 return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
74}
75
76static inline pmd_t *vmem_pmd_alloc(void)
77{
78 pmd_t *pmd;
79 int i;
80
81 pmd = vmem_alloc_pages(PMD_ALLOC_ORDER);
82 if (!pmd)
83 return NULL;
84 for (i = 0; i < PTRS_PER_PMD; i++)
85 pmd_clear(pmd + i);
86 return pmd;
87}
88
89static inline pte_t *vmem_pte_alloc(void)
90{
91 pte_t *pte;
92 pte_t empty_pte;
93 int i;
94
95 pte = vmem_alloc_pages(PTE_ALLOC_ORDER);
96 if (!pte)
97 return NULL;
98 pte_val(empty_pte) = _PAGE_TYPE_EMPTY;
99 for (i = 0; i < PTRS_PER_PTE; i++)
100 set_pte(pte + i, empty_pte);
101 return pte;
102}
103
104/*
105 * Add a physical memory range to the 1:1 mapping.
106 */
107static int vmem_add_range(unsigned long start, unsigned long size)
108{
109 unsigned long address;
110 pgd_t *pg_dir;
111 pmd_t *pm_dir;
112 pte_t *pt_dir;
113 pte_t pte;
114 int ret = -ENOMEM;
115
116 for (address = start; address < start + size; address += PAGE_SIZE) {
117 pg_dir = pgd_offset_k(address);
118 if (pgd_none(*pg_dir)) {
119 pm_dir = vmem_pmd_alloc();
120 if (!pm_dir)
121 goto out;
122 pgd_populate(&init_mm, pg_dir, pm_dir);
123 }
124
125 pm_dir = pmd_offset(pg_dir, address);
126 if (pmd_none(*pm_dir)) {
127 pt_dir = vmem_pte_alloc();
128 if (!pt_dir)
129 goto out;
130 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
131 }
132
133 pt_dir = pte_offset_kernel(pm_dir, address);
134 pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL);
135 set_pte(pt_dir, pte);
136 }
137 ret = 0;
138out:
139 flush_tlb_kernel_range(start, start + size);
140 return ret;
141}
142
143/*
144 * Remove a physical memory range from the 1:1 mapping.
145 * Currently only invalidates page table entries.
146 */
147static void vmem_remove_range(unsigned long start, unsigned long size)
148{
149 unsigned long address;
150 pgd_t *pg_dir;
151 pmd_t *pm_dir;
152 pte_t *pt_dir;
153 pte_t pte;
154
155 pte_val(pte) = _PAGE_TYPE_EMPTY;
156 for (address = start; address < start + size; address += PAGE_SIZE) {
157 pg_dir = pgd_offset_k(address);
158 if (pgd_none(*pg_dir))
159 continue;
160 pm_dir = pmd_offset(pg_dir, address);
161 if (pmd_none(*pm_dir))
162 continue;
163 pt_dir = pte_offset_kernel(pm_dir, address);
164 set_pte(pt_dir, pte);
165 }
166 flush_tlb_kernel_range(start, start + size);
167}
168
169/*
170 * Add a backed mem_map array to the virtual mem_map array.
171 */
172static int vmem_add_mem_map(unsigned long start, unsigned long size)
173{
174 unsigned long address, start_addr, end_addr;
175 struct page *map_start, *map_end;
176 pgd_t *pg_dir;
177 pmd_t *pm_dir;
178 pte_t *pt_dir;
179 pte_t pte;
180 int ret = -ENOMEM;
181
182 map_start = vmem_map + PFN_DOWN(start);
183 map_end = vmem_map + PFN_DOWN(start + size);
184
185 start_addr = (unsigned long) map_start & PAGE_MASK;
186 end_addr = PFN_ALIGN((unsigned long) map_end);
187
188 for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
189 pg_dir = pgd_offset_k(address);
190 if (pgd_none(*pg_dir)) {
191 pm_dir = vmem_pmd_alloc();
192 if (!pm_dir)
193 goto out;
194 pgd_populate(&init_mm, pg_dir, pm_dir);
195 }
196
197 pm_dir = pmd_offset(pg_dir, address);
198 if (pmd_none(*pm_dir)) {
199 pt_dir = vmem_pte_alloc();
200 if (!pt_dir)
201 goto out;
202 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
203 }
204
205 pt_dir = pte_offset_kernel(pm_dir, address);
206 if (pte_none(*pt_dir)) {
207 unsigned long new_page;
208
209 new_page =__pa(vmem_alloc_pages(0));
210 if (!new_page)
211 goto out;
212 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
213 set_pte(pt_dir, pte);
214 }
215 }
216 ret = 0;
217out:
218 flush_tlb_kernel_range(start_addr, end_addr);
219 return ret;
220}
221
222static int vmem_add_mem(unsigned long start, unsigned long size)
223{
224 int ret;
225
226 ret = vmem_add_range(start, size);
227 if (ret)
228 return ret;
229 return vmem_add_mem_map(start, size);
230}
231
232/*
233 * Add memory segment to the segment list if it doesn't overlap with
234 * an already present segment.
235 */
236static int insert_memory_segment(struct memory_segment *seg)
237{
238 struct memory_segment *tmp;
239
240 if (PFN_DOWN(seg->start + seg->size) > max_pfn ||
241 seg->start + seg->size < seg->start)
242 return -ERANGE;
243
244 list_for_each_entry(tmp, &mem_segs, list) {
245 if (seg->start >= tmp->start + tmp->size)
246 continue;
247 if (seg->start + seg->size <= tmp->start)
248 continue;
249 return -ENOSPC;
250 }
251 list_add(&seg->list, &mem_segs);
252 return 0;
253}
254
255/*
256 * Remove memory segment from the segment list.
257 */
258static void remove_memory_segment(struct memory_segment *seg)
259{
260 list_del(&seg->list);
261}
262
263static void __remove_shared_memory(struct memory_segment *seg)
264{
265 remove_memory_segment(seg);
266 vmem_remove_range(seg->start, seg->size);
267}
268
269int remove_shared_memory(unsigned long start, unsigned long size)
270{
271 struct memory_segment *seg;
272 int ret;
273
274 mutex_lock(&vmem_mutex);
275
276 ret = -ENOENT;
277 list_for_each_entry(seg, &mem_segs, list) {
278 if (seg->start == start && seg->size == size)
279 break;
280 }
281
282 if (seg->start != start || seg->size != size)
283 goto out;
284
285 ret = 0;
286 __remove_shared_memory(seg);
287 kfree(seg);
288out:
289 mutex_unlock(&vmem_mutex);
290 return ret;
291}
292
293int add_shared_memory(unsigned long start, unsigned long size)
294{
295 struct memory_segment *seg;
296 struct page *page;
297 unsigned long pfn, num_pfn, end_pfn;
298 int ret;
299
300 mutex_lock(&vmem_mutex);
301 ret = -ENOMEM;
302 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
303 if (!seg)
304 goto out;
305 seg->start = start;
306 seg->size = size;
307
308 ret = insert_memory_segment(seg);
309 if (ret)
310 goto out_free;
311
312 ret = vmem_add_mem(start, size);
313 if (ret)
314 goto out_remove;
315
316 pfn = PFN_DOWN(start);
317 num_pfn = PFN_DOWN(size);
318 end_pfn = pfn + num_pfn;
319
320 page = pfn_to_page(pfn);
321 memset(page, 0, num_pfn * sizeof(struct page));
322
323 for (; pfn < end_pfn; pfn++) {
324 page = pfn_to_page(pfn);
325 init_page_count(page);
326 reset_page_mapcount(page);
327 SetPageReserved(page);
328 INIT_LIST_HEAD(&page->lru);
329 }
330 goto out;
331
332out_remove:
333 __remove_shared_memory(seg);
334out_free:
335 kfree(seg);
336out:
337 mutex_unlock(&vmem_mutex);
338 return ret;
339}
340
341/*
342 * map whole physical memory to virtual memory (identity mapping)
343 */
344void __init vmem_map_init(void)
345{
346 unsigned long map_size;
347 int i;
348
349 map_size = ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page);
350 vmalloc_end = PFN_ALIGN(VMALLOC_END_INIT) - PFN_ALIGN(map_size);
351 vmem_map = (struct page *) vmalloc_end;
352 NODE_DATA(0)->node_mem_map = vmem_map;
353
354 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
355 vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
356}
357
358/*
359 * Convert memory chunk array to a memory segment list so there is a single
360 * list that contains both r/w memory and shared memory segments.
361 */
362static int __init vmem_convert_memory_chunk(void)
363{
364 struct memory_segment *seg;
365 int i;
366
367 mutex_lock(&vmem_mutex);
368 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
369 if (!memory_chunk[i].size)
370 continue;
371 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
372 if (!seg)
373 panic("Out of memory...\n");
374 seg->start = memory_chunk[i].addr;
375 seg->size = memory_chunk[i].size;
376 insert_memory_segment(seg);
377 }
378 mutex_unlock(&vmem_mutex);
379 return 0;
380}
381
382core_initcall(vmem_convert_memory_chunk);