aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.osdl.org>2006-12-08 14:21:55 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-08 14:21:55 -0500
commit88032b322a38b37335c8cb2e3473a45c81d280eb (patch)
treecd722ab15b18a10f6b1aa50656b8da713ee8b799 /arch
parent63f3861d2fbf8ccbad1386ac9ac8b822c036ea00 (diff)
parent028d9b3cc62cb9dd31f1b5929edb3c23612cfccc (diff)
Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6: [S390] Poison init section before freeing it. [S390] Use add_active_range() and free_area_init_nodes(). [S390] Virtual memmap for s390. [S390] Update documentation for dynamic subchannel mapping. [S390] Use dev->groups for adding/removing the subchannel attribute group. [S390] Support for disconnected devices reappearing on another subchannel. [S390] subchannel lock conversion. [S390] Some preparations for the dynamic subchannel mapping patch. [S390] runtime switch for qdio performance statistics [S390] New DASD feature for ERP related logging [S390] add reset call handler to the ap bus. [S390] more workqueue fixes. [S390] workqueue fixes. [S390] uaccess_pt: add missing down_read() and convert to is_init().
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/Kconfig14
-rw-r--r--arch/s390/defconfig1
-rw-r--r--arch/s390/kernel/setup.c55
-rw-r--r--arch/s390/lib/uaccess_pt.c5
-rw-r--r--arch/s390/mm/Makefile2
-rw-r--r--arch/s390/mm/extmem.c106
-rw-r--r--arch/s390/mm/init.c184
-rw-r--r--arch/s390/mm/vmem.c381
8 files changed, 477 insertions, 271 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 45e47bfb68a9..ff690564edbd 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -241,8 +241,14 @@ config WARN_STACK_SIZE
241 This allows you to specify the maximum frame size a function may 241 This allows you to specify the maximum frame size a function may
242 have without the compiler complaining about it. 242 have without the compiler complaining about it.
243 243
244config ARCH_POPULATES_NODE_MAP
245 def_bool y
246
244source "mm/Kconfig" 247source "mm/Kconfig"
245 248
249config HOLES_IN_ZONE
250 def_bool y
251
246comment "I/O subsystem configuration" 252comment "I/O subsystem configuration"
247 253
248config MACHCHK_WARNING 254config MACHCHK_WARNING
@@ -266,14 +272,6 @@ config QDIO
266 272
267 If unsure, say Y. 273 If unsure, say Y.
268 274
269config QDIO_PERF_STATS
270 bool "Performance statistics in /proc"
271 depends on QDIO
272 help
273 Say Y here to get performance statistics in /proc/qdio_perf
274
275 If unsure, say N.
276
277config QDIO_DEBUG 275config QDIO_DEBUG
278 bool "Extended debugging information" 276 bool "Extended debugging information"
279 depends on QDIO 277 depends on QDIO
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 7cd51e73e274..a6ec919ba83f 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -134,7 +134,6 @@ CONFIG_RESOURCES_64BIT=y
134# 134#
135CONFIG_MACHCHK_WARNING=y 135CONFIG_MACHCHK_WARNING=y
136CONFIG_QDIO=y 136CONFIG_QDIO=y
137# CONFIG_QDIO_PERF_STATS is not set
138# CONFIG_QDIO_DEBUG is not set 137# CONFIG_QDIO_DEBUG is not set
139 138
140# 139#
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index b928fecdc743..49ef206ec880 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -64,9 +64,8 @@ unsigned int console_devno = -1;
64unsigned int console_irq = -1; 64unsigned int console_irq = -1;
65unsigned long machine_flags = 0; 65unsigned long machine_flags = 0;
66 66
67struct mem_chunk memory_chunk[MEMORY_CHUNKS]; 67struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
68volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ 68volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
69unsigned long __initdata zholes_size[MAX_NR_ZONES];
70static unsigned long __initdata memory_end; 69static unsigned long __initdata memory_end;
71 70
72/* 71/*
@@ -354,21 +353,6 @@ void machine_power_off(void)
354 */ 353 */
355void (*pm_power_off)(void) = machine_power_off; 354void (*pm_power_off)(void) = machine_power_off;
356 355
357static void __init
358add_memory_hole(unsigned long start, unsigned long end)
359{
360 unsigned long dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
361
362 if (end <= dma_pfn)
363 zholes_size[ZONE_DMA] += end - start + 1;
364 else if (start > dma_pfn)
365 zholes_size[ZONE_NORMAL] += end - start + 1;
366 else {
367 zholes_size[ZONE_DMA] += dma_pfn - start + 1;
368 zholes_size[ZONE_NORMAL] += end - dma_pfn;
369 }
370}
371
372static int __init early_parse_mem(char *p) 356static int __init early_parse_mem(char *p)
373{ 357{
374 memory_end = memparse(p, &p); 358 memory_end = memparse(p, &p);
@@ -521,7 +505,6 @@ setup_memory(void)
521{ 505{
522 unsigned long bootmap_size; 506 unsigned long bootmap_size;
523 unsigned long start_pfn, end_pfn, init_pfn; 507 unsigned long start_pfn, end_pfn, init_pfn;
524 unsigned long last_rw_end;
525 int i; 508 int i;
526 509
527 /* 510 /*
@@ -577,39 +560,27 @@ setup_memory(void)
577 /* 560 /*
578 * Register RAM areas with the bootmem allocator. 561 * Register RAM areas with the bootmem allocator.
579 */ 562 */
580 last_rw_end = start_pfn;
581 563
582 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 564 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
583 unsigned long start_chunk, end_chunk; 565 unsigned long start_chunk, end_chunk, pfn;
584 566
585 if (memory_chunk[i].type != CHUNK_READ_WRITE) 567 if (memory_chunk[i].type != CHUNK_READ_WRITE)
586 continue; 568 continue;
587 start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1); 569 start_chunk = PFN_DOWN(memory_chunk[i].addr);
588 start_chunk >>= PAGE_SHIFT; 570 end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1;
589 end_chunk = (memory_chunk[i].addr + memory_chunk[i].size); 571 end_chunk = min(end_chunk, end_pfn);
590 end_chunk >>= PAGE_SHIFT; 572 if (start_chunk >= end_chunk)
591 if (start_chunk < start_pfn) 573 continue;
592 start_chunk = start_pfn; 574 add_active_range(0, start_chunk, end_chunk);
593 if (end_chunk > end_pfn) 575 pfn = max(start_chunk, start_pfn);
594 end_chunk = end_pfn; 576 for (; pfn <= end_chunk; pfn++)
595 if (start_chunk < end_chunk) { 577 page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY);
596 /* Initialize storage key for RAM pages */
597 for (init_pfn = start_chunk ; init_pfn < end_chunk;
598 init_pfn++)
599 page_set_storage_key(init_pfn << PAGE_SHIFT,
600 PAGE_DEFAULT_KEY);
601 free_bootmem(start_chunk << PAGE_SHIFT,
602 (end_chunk - start_chunk) << PAGE_SHIFT);
603 if (last_rw_end < start_chunk)
604 add_memory_hole(last_rw_end, start_chunk - 1);
605 last_rw_end = end_chunk;
606 }
607 } 578 }
608 579
609 psw_set_key(PAGE_DEFAULT_KEY); 580 psw_set_key(PAGE_DEFAULT_KEY);
610 581
611 if (last_rw_end < end_pfn - 1) 582 free_bootmem_with_active_regions(0, max_pfn);
612 add_memory_hole(last_rw_end, end_pfn - 1); 583 reserve_bootmem(0, PFN_PHYS(start_pfn));
613 584
614 /* 585 /*
615 * Reserve the bootmem bitmap itself as well. We do this in two 586 * Reserve the bootmem bitmap itself as well. We do this in two
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 8741bdc09299..633249c3ba91 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -8,8 +8,8 @@
8 */ 8 */
9 9
10#include <linux/errno.h> 10#include <linux/errno.h>
11#include <asm/uaccess.h>
12#include <linux/mm.h> 11#include <linux/mm.h>
12#include <asm/uaccess.h>
13#include <asm/futex.h> 13#include <asm/futex.h>
14 14
15static inline int __handle_fault(struct mm_struct *mm, unsigned long address, 15static inline int __handle_fault(struct mm_struct *mm, unsigned long address,
@@ -60,8 +60,9 @@ out:
60 60
61out_of_memory: 61out_of_memory:
62 up_read(&mm->mmap_sem); 62 up_read(&mm->mmap_sem);
63 if (current->pid == 1) { 63 if (is_init(current)) {
64 yield(); 64 yield();
65 down_read(&mm->mmap_sem);
65 goto survive; 66 goto survive;
66 } 67 }
67 printk("VM: killing process %s\n", current->comm); 68 printk("VM: killing process %s\n", current->comm);
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index aa9a42b6e62d..8e09db1edbb9 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -2,6 +2,6 @@
2# Makefile for the linux s390-specific parts of the memory manager. 2# Makefile for the linux s390-specific parts of the memory manager.
3# 3#
4 4
5obj-y := init.o fault.o ioremap.o extmem.o mmap.o 5obj-y := init.o fault.o ioremap.o extmem.o mmap.o vmem.o
6obj-$(CONFIG_CMM) += cmm.o 6obj-$(CONFIG_CMM) += cmm.o
7 7
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 9e9bc48463a5..775bf19e742b 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -16,6 +16,7 @@
16#include <linux/bootmem.h> 16#include <linux/bootmem.h>
17#include <linux/ctype.h> 17#include <linux/ctype.h>
18#include <asm/page.h> 18#include <asm/page.h>
19#include <asm/pgtable.h>
19#include <asm/ebcdic.h> 20#include <asm/ebcdic.h>
20#include <asm/errno.h> 21#include <asm/errno.h>
21#include <asm/extmem.h> 22#include <asm/extmem.h>
@@ -238,65 +239,6 @@ query_segment_type (struct dcss_segment *seg)
238} 239}
239 240
240/* 241/*
241 * check if the given segment collides with guest storage.
242 * returns 1 if this is the case, 0 if no collision was found
243 */
244static int
245segment_overlaps_storage(struct dcss_segment *seg)
246{
247 int i;
248
249 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
250 if (memory_chunk[i].type != CHUNK_READ_WRITE)
251 continue;
252 if ((memory_chunk[i].addr >> 20) > (seg->end >> 20))
253 continue;
254 if (((memory_chunk[i].addr + memory_chunk[i].size - 1) >> 20)
255 < (seg->start_addr >> 20))
256 continue;
257 return 1;
258 }
259 return 0;
260}
261
262/*
263 * check if segment collides with other segments that are currently loaded
264 * returns 1 if this is the case, 0 if no collision was found
265 */
266static int
267segment_overlaps_others (struct dcss_segment *seg)
268{
269 struct list_head *l;
270 struct dcss_segment *tmp;
271
272 BUG_ON(!mutex_is_locked(&dcss_lock));
273 list_for_each(l, &dcss_list) {
274 tmp = list_entry(l, struct dcss_segment, list);
275 if ((tmp->start_addr >> 20) > (seg->end >> 20))
276 continue;
277 if ((tmp->end >> 20) < (seg->start_addr >> 20))
278 continue;
279 if (seg == tmp)
280 continue;
281 return 1;
282 }
283 return 0;
284}
285
286/*
287 * check if segment exceeds the kernel mapping range (detected or set via mem=)
288 * returns 1 if this is the case, 0 if segment fits into the range
289 */
290static inline int
291segment_exceeds_range (struct dcss_segment *seg)
292{
293 int seg_last_pfn = (seg->end) >> PAGE_SHIFT;
294 if (seg_last_pfn > max_pfn)
295 return 1;
296 return 0;
297}
298
299/*
300 * get info about a segment 242 * get info about a segment
301 * possible return values: 243 * possible return values:
302 * -ENOSYS : we are not running on VM 244 * -ENOSYS : we are not running on VM
@@ -341,24 +283,26 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
341 rc = query_segment_type (seg); 283 rc = query_segment_type (seg);
342 if (rc < 0) 284 if (rc < 0)
343 goto out_free; 285 goto out_free;
344 if (segment_exceeds_range(seg)) { 286
345 PRINT_WARN ("segment_load: not loading segment %s - exceeds" 287 rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
346 " kernel mapping range\n",name); 288
347 rc = -ERANGE; 289 switch (rc) {
290 case 0:
291 break;
292 case -ENOSPC:
293 PRINT_WARN("segment_load: not loading segment %s - overlaps "
294 "storage/segment\n", name);
348 goto out_free; 295 goto out_free;
349 } 296 case -ERANGE:
350 if (segment_overlaps_storage(seg)) { 297 PRINT_WARN("segment_load: not loading segment %s - exceeds "
351 PRINT_WARN ("segment_load: not loading segment %s - overlaps" 298 "kernel mapping range\n", name);
352 " storage\n",name);
353 rc = -ENOSPC;
354 goto out_free; 299 goto out_free;
355 } 300 default:
356 if (segment_overlaps_others(seg)) { 301 PRINT_WARN("segment_load: not loading segment %s (rc: %d)\n",
357 PRINT_WARN ("segment_load: not loading segment %s - overlaps" 302 name, rc);
358 " other segments\n",name);
359 rc = -EBUSY;
360 goto out_free; 303 goto out_free;
361 } 304 }
305
362 if (do_nonshared) 306 if (do_nonshared)
363 dcss_command = DCSS_LOADNSR; 307 dcss_command = DCSS_LOADNSR;
364 else 308 else
@@ -372,7 +316,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
372 rc = dcss_diag_translate_rc (seg->end); 316 rc = dcss_diag_translate_rc (seg->end);
373 dcss_diag(DCSS_PURGESEG, seg->dcss_name, 317 dcss_diag(DCSS_PURGESEG, seg->dcss_name,
374 &seg->start_addr, &seg->end); 318 &seg->start_addr, &seg->end);
375 goto out_free; 319 goto out_shared;
376 } 320 }
377 seg->do_nonshared = do_nonshared; 321 seg->do_nonshared = do_nonshared;
378 atomic_set(&seg->ref_count, 1); 322 atomic_set(&seg->ref_count, 1);
@@ -391,6 +335,8 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
391 (void*)seg->start_addr, (void*)seg->end, 335 (void*)seg->start_addr, (void*)seg->end,
392 segtype_string[seg->vm_segtype]); 336 segtype_string[seg->vm_segtype]);
393 goto out; 337 goto out;
338 out_shared:
339 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
394 out_free: 340 out_free:
395 kfree(seg); 341 kfree(seg);
396 out: 342 out:
@@ -530,12 +476,12 @@ segment_unload(char *name)
530 "please report to linux390@de.ibm.com\n",name); 476 "please report to linux390@de.ibm.com\n",name);
531 goto out_unlock; 477 goto out_unlock;
532 } 478 }
533 if (atomic_dec_return(&seg->ref_count) == 0) { 479 if (atomic_dec_return(&seg->ref_count) != 0)
534 list_del(&seg->list); 480 goto out_unlock;
535 dcss_diag(DCSS_PURGESEG, seg->dcss_name, 481 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
536 &dummy, &dummy); 482 list_del(&seg->list);
537 kfree(seg); 483 dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
538 } 484 kfree(seg);
539out_unlock: 485out_unlock:
540 mutex_unlock(&dcss_lock); 486 mutex_unlock(&dcss_lock);
541} 487}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index e1881c31b1cb..4bb21be3b007 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -24,6 +24,7 @@
24#include <linux/pagemap.h> 24#include <linux/pagemap.h>
25#include <linux/bootmem.h> 25#include <linux/bootmem.h>
26#include <linux/pfn.h> 26#include <linux/pfn.h>
27#include <linux/poison.h>
27 28
28#include <asm/processor.h> 29#include <asm/processor.h>
29#include <asm/system.h> 30#include <asm/system.h>
@@ -69,6 +70,8 @@ void show_mem(void)
69 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 70 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
70 i = max_mapnr; 71 i = max_mapnr;
71 while (i-- > 0) { 72 while (i-- > 0) {
73 if (!pfn_valid(i))
74 continue;
72 page = pfn_to_page(i); 75 page = pfn_to_page(i);
73 total++; 76 total++;
74 if (PageReserved(page)) 77 if (PageReserved(page))
@@ -84,150 +87,52 @@ void show_mem(void)
84 printk("%d pages swap cached\n",cached); 87 printk("%d pages swap cached\n",cached);
85} 88}
86 89
87extern unsigned long __initdata zholes_size[]; 90static void __init setup_ro_region(void)
88/*
89 * paging_init() sets up the page tables
90 */
91
92#ifndef CONFIG_64BIT
93void __init paging_init(void)
94{ 91{
95 pgd_t * pg_dir; 92 pgd_t *pgd;
96 pte_t * pg_table; 93 pmd_t *pmd;
97 pte_t pte; 94 pte_t *pte;
98 int i; 95 pte_t new_pte;
99 unsigned long tmp; 96 unsigned long address, end;
100 unsigned long pfn = 0; 97
101 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 98 address = ((unsigned long)&__start_rodata) & PAGE_MASK;
102 static const int ssm_mask = 0x04000000L; 99 end = PFN_ALIGN((unsigned long)&__end_rodata);
103 unsigned long ro_start_pfn, ro_end_pfn; 100
104 unsigned long zones_size[MAX_NR_ZONES]; 101 for (; address < end; address += PAGE_SIZE) {
105 102 pgd = pgd_offset_k(address);
106 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); 103 pmd = pmd_offset(pgd, address);
107 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); 104 pte = pte_offset_kernel(pmd, address);
108 105 new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO));
109 memset(zones_size, 0, sizeof(zones_size)); 106 set_pte(pte, new_pte);
110 zones_size[ZONE_DMA] = max_low_pfn; 107 }
111 free_area_init_node(0, &contig_page_data, zones_size,
112 __pa(PAGE_OFFSET) >> PAGE_SHIFT,
113 zholes_size);
114
115 /* unmap whole virtual address space */
116
117 pg_dir = swapper_pg_dir;
118
119 for (i = 0; i < PTRS_PER_PGD; i++)
120 pmd_clear((pmd_t *) pg_dir++);
121
122 /*
123 * map whole physical memory to virtual memory (identity mapping)
124 */
125
126 pg_dir = swapper_pg_dir;
127
128 while (pfn < max_low_pfn) {
129 /*
130 * pg_table is physical at this point
131 */
132 pg_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
133
134 pmd_populate_kernel(&init_mm, (pmd_t *) pg_dir, pg_table);
135 pg_dir++;
136
137 for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
138 if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
139 pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
140 else
141 pte = pfn_pte(pfn, PAGE_KERNEL);
142 if (pfn >= max_low_pfn)
143 pte_val(pte) = _PAGE_TYPE_EMPTY;
144 set_pte(pg_table, pte);
145 pfn++;
146 }
147 }
148
149 S390_lowcore.kernel_asce = pgdir_k;
150
151 /* enable virtual mapping in kernel mode */
152 __ctl_load(pgdir_k, 1, 1);
153 __ctl_load(pgdir_k, 7, 7);
154 __ctl_load(pgdir_k, 13, 13);
155 __raw_local_irq_ssm(ssm_mask);
156
157 local_flush_tlb();
158} 108}
159 109
160#else /* CONFIG_64BIT */ 110extern void vmem_map_init(void);
161 111
112/*
113 * paging_init() sets up the page tables
114 */
162void __init paging_init(void) 115void __init paging_init(void)
163{ 116{
164 pgd_t * pg_dir; 117 pgd_t *pg_dir;
165 pmd_t * pm_dir; 118 int i;
166 pte_t * pt_dir; 119 unsigned long pgdir_k;
167 pte_t pte;
168 int i,j,k;
169 unsigned long pfn = 0;
170 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
171 _KERN_REGION_TABLE;
172 static const int ssm_mask = 0x04000000L; 120 static const int ssm_mask = 0x04000000L;
173 unsigned long zones_size[MAX_NR_ZONES]; 121 unsigned long max_zone_pfns[MAX_NR_ZONES];
174 unsigned long dma_pfn, high_pfn;
175 unsigned long ro_start_pfn, ro_end_pfn;
176
177 memset(zones_size, 0, sizeof(zones_size));
178 dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
179 high_pfn = max_low_pfn;
180 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
181 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
182
183 if (dma_pfn > high_pfn)
184 zones_size[ZONE_DMA] = high_pfn;
185 else {
186 zones_size[ZONE_DMA] = dma_pfn;
187 zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
188 }
189
190 /* Initialize mem_map[]. */
191 free_area_init_node(0, &contig_page_data, zones_size,
192 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
193 122
194 /* 123 pg_dir = swapper_pg_dir;
195 * map whole physical memory to virtual memory (identity mapping)
196 */
197
198 pg_dir = swapper_pg_dir;
199
200 for (i = 0 ; i < PTRS_PER_PGD ; i++,pg_dir++) {
201 124
202 if (pfn >= max_low_pfn) { 125#ifdef CONFIG_64BIT
203 pgd_clear(pg_dir); 126 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE;
204 continue; 127 for (i = 0; i < PTRS_PER_PGD; i++)
205 } 128 pgd_clear(pg_dir + i);
206 129#else
207 pm_dir = (pmd_t *) alloc_bootmem_pages(PAGE_SIZE * 4); 130 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
208 pgd_populate(&init_mm, pg_dir, pm_dir); 131 for (i = 0; i < PTRS_PER_PGD; i++)
209 132 pmd_clear((pmd_t *)(pg_dir + i));
210 for (j = 0 ; j < PTRS_PER_PMD ; j++,pm_dir++) { 133#endif
211 if (pfn >= max_low_pfn) { 134 vmem_map_init();
212 pmd_clear(pm_dir); 135 setup_ro_region();
213 continue;
214 }
215
216 pt_dir = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
217 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
218
219 for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
220 if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
221 pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
222 else
223 pte = pfn_pte(pfn, PAGE_KERNEL);
224 if (pfn >= max_low_pfn)
225 pte_val(pte) = _PAGE_TYPE_EMPTY;
226 set_pte(pt_dir, pte);
227 pfn++;
228 }
229 }
230 }
231 136
232 S390_lowcore.kernel_asce = pgdir_k; 137 S390_lowcore.kernel_asce = pgdir_k;
233 138
@@ -237,9 +142,11 @@ void __init paging_init(void)
237 __ctl_load(pgdir_k, 13, 13); 142 __ctl_load(pgdir_k, 13, 13);
238 __raw_local_irq_ssm(ssm_mask); 143 __raw_local_irq_ssm(ssm_mask);
239 144
240 local_flush_tlb(); 145 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
146 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
147 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
148 free_area_init_nodes(max_zone_pfns);
241} 149}
242#endif /* CONFIG_64BIT */
243 150
244void __init mem_init(void) 151void __init mem_init(void)
245{ 152{
@@ -269,6 +176,8 @@ void __init mem_init(void)
269 printk("Write protected kernel read-only data: %#lx - %#lx\n", 176 printk("Write protected kernel read-only data: %#lx - %#lx\n",
270 (unsigned long)&__start_rodata, 177 (unsigned long)&__start_rodata,
271 PFN_ALIGN((unsigned long)&__end_rodata) - 1); 178 PFN_ALIGN((unsigned long)&__end_rodata) - 1);
179 printk("Virtual memmap size: %ldk\n",
180 (max_pfn * sizeof(struct page)) >> 10);
272} 181}
273 182
274void free_initmem(void) 183void free_initmem(void)
@@ -279,6 +188,7 @@ void free_initmem(void)
279 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { 188 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
280 ClearPageReserved(virt_to_page(addr)); 189 ClearPageReserved(virt_to_page(addr));
281 init_page_count(virt_to_page(addr)); 190 init_page_count(virt_to_page(addr));
191 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
282 free_page(addr); 192 free_page(addr);
283 totalram_pages++; 193 totalram_pages++;
284 } 194 }
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
new file mode 100644
index 000000000000..7f2944d3ec2a
--- /dev/null
+++ b/arch/s390/mm/vmem.c
@@ -0,0 +1,381 @@
1/*
2 * arch/s390/mm/vmem.c
3 *
4 * Copyright IBM Corp. 2006
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/bootmem.h>
9#include <linux/pfn.h>
10#include <linux/mm.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <asm/pgalloc.h>
14#include <asm/pgtable.h>
15#include <asm/setup.h>
16#include <asm/tlbflush.h>
17
18unsigned long vmalloc_end;
19EXPORT_SYMBOL(vmalloc_end);
20
21static struct page *vmem_map;
22static DEFINE_MUTEX(vmem_mutex);
23
24struct memory_segment {
25 struct list_head list;
26 unsigned long start;
27 unsigned long size;
28};
29
30static LIST_HEAD(mem_segs);
31
32void memmap_init(unsigned long size, int nid, unsigned long zone,
33 unsigned long start_pfn)
34{
35 struct page *start, *end;
36 struct page *map_start, *map_end;
37 int i;
38
39 start = pfn_to_page(start_pfn);
40 end = start + size;
41
42 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
43 unsigned long cstart, cend;
44
45 cstart = PFN_DOWN(memory_chunk[i].addr);
46 cend = cstart + PFN_DOWN(memory_chunk[i].size);
47
48 map_start = mem_map + cstart;
49 map_end = mem_map + cend;
50
51 if (map_start < start)
52 map_start = start;
53 if (map_end > end)
54 map_end = end;
55
56 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1))
57 / sizeof(struct page);
58 map_end += ((PFN_ALIGN((unsigned long) map_end)
59 - (unsigned long) map_end)
60 / sizeof(struct page));
61
62 if (map_start < map_end)
63 memmap_init_zone((unsigned long)(map_end - map_start),
64 nid, zone, page_to_pfn(map_start));
65 }
66}
67
68static inline void *vmem_alloc_pages(unsigned int order)
69{
70 if (slab_is_available())
71 return (void *)__get_free_pages(GFP_KERNEL, order);
72 return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
73}
74
75static inline pmd_t *vmem_pmd_alloc(void)
76{
77 pmd_t *pmd;
78 int i;
79
80 pmd = vmem_alloc_pages(PMD_ALLOC_ORDER);
81 if (!pmd)
82 return NULL;
83 for (i = 0; i < PTRS_PER_PMD; i++)
84 pmd_clear(pmd + i);
85 return pmd;
86}
87
88static inline pte_t *vmem_pte_alloc(void)
89{
90 pte_t *pte;
91 pte_t empty_pte;
92 int i;
93
94 pte = vmem_alloc_pages(PTE_ALLOC_ORDER);
95 if (!pte)
96 return NULL;
97 pte_val(empty_pte) = _PAGE_TYPE_EMPTY;
98 for (i = 0; i < PTRS_PER_PTE; i++)
99 set_pte(pte + i, empty_pte);
100 return pte;
101}
102
103/*
104 * Add a physical memory range to the 1:1 mapping.
105 */
106static int vmem_add_range(unsigned long start, unsigned long size)
107{
108 unsigned long address;
109 pgd_t *pg_dir;
110 pmd_t *pm_dir;
111 pte_t *pt_dir;
112 pte_t pte;
113 int ret = -ENOMEM;
114
115 for (address = start; address < start + size; address += PAGE_SIZE) {
116 pg_dir = pgd_offset_k(address);
117 if (pgd_none(*pg_dir)) {
118 pm_dir = vmem_pmd_alloc();
119 if (!pm_dir)
120 goto out;
121 pgd_populate(&init_mm, pg_dir, pm_dir);
122 }
123
124 pm_dir = pmd_offset(pg_dir, address);
125 if (pmd_none(*pm_dir)) {
126 pt_dir = vmem_pte_alloc();
127 if (!pt_dir)
128 goto out;
129 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
130 }
131
132 pt_dir = pte_offset_kernel(pm_dir, address);
133 pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL);
134 set_pte(pt_dir, pte);
135 }
136 ret = 0;
137out:
138 flush_tlb_kernel_range(start, start + size);
139 return ret;
140}
141
142/*
143 * Remove a physical memory range from the 1:1 mapping.
144 * Currently only invalidates page table entries.
145 */
146static void vmem_remove_range(unsigned long start, unsigned long size)
147{
148 unsigned long address;
149 pgd_t *pg_dir;
150 pmd_t *pm_dir;
151 pte_t *pt_dir;
152 pte_t pte;
153
154 pte_val(pte) = _PAGE_TYPE_EMPTY;
155 for (address = start; address < start + size; address += PAGE_SIZE) {
156 pg_dir = pgd_offset_k(address);
157 if (pgd_none(*pg_dir))
158 continue;
159 pm_dir = pmd_offset(pg_dir, address);
160 if (pmd_none(*pm_dir))
161 continue;
162 pt_dir = pte_offset_kernel(pm_dir, address);
163 set_pte(pt_dir, pte);
164 }
165 flush_tlb_kernel_range(start, start + size);
166}
167
168/*
169 * Add a backed mem_map array to the virtual mem_map array.
170 */
171static int vmem_add_mem_map(unsigned long start, unsigned long size)
172{
173 unsigned long address, start_addr, end_addr;
174 struct page *map_start, *map_end;
175 pgd_t *pg_dir;
176 pmd_t *pm_dir;
177 pte_t *pt_dir;
178 pte_t pte;
179 int ret = -ENOMEM;
180
181 map_start = vmem_map + PFN_DOWN(start);
182 map_end = vmem_map + PFN_DOWN(start + size);
183
184 start_addr = (unsigned long) map_start & PAGE_MASK;
185 end_addr = PFN_ALIGN((unsigned long) map_end);
186
187 for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
188 pg_dir = pgd_offset_k(address);
189 if (pgd_none(*pg_dir)) {
190 pm_dir = vmem_pmd_alloc();
191 if (!pm_dir)
192 goto out;
193 pgd_populate(&init_mm, pg_dir, pm_dir);
194 }
195
196 pm_dir = pmd_offset(pg_dir, address);
197 if (pmd_none(*pm_dir)) {
198 pt_dir = vmem_pte_alloc();
199 if (!pt_dir)
200 goto out;
201 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
202 }
203
204 pt_dir = pte_offset_kernel(pm_dir, address);
205 if (pte_none(*pt_dir)) {
206 unsigned long new_page;
207
208 new_page =__pa(vmem_alloc_pages(0));
209 if (!new_page)
210 goto out;
211 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
212 set_pte(pt_dir, pte);
213 }
214 }
215 ret = 0;
216out:
217 flush_tlb_kernel_range(start_addr, end_addr);
218 return ret;
219}
220
221static int vmem_add_mem(unsigned long start, unsigned long size)
222{
223 int ret;
224
225 ret = vmem_add_range(start, size);
226 if (ret)
227 return ret;
228 return vmem_add_mem_map(start, size);
229}
230
231/*
232 * Add memory segment to the segment list if it doesn't overlap with
233 * an already present segment.
234 */
235static int insert_memory_segment(struct memory_segment *seg)
236{
237 struct memory_segment *tmp;
238
239 if (PFN_DOWN(seg->start + seg->size) > max_pfn ||
240 seg->start + seg->size < seg->start)
241 return -ERANGE;
242
243 list_for_each_entry(tmp, &mem_segs, list) {
244 if (seg->start >= tmp->start + tmp->size)
245 continue;
246 if (seg->start + seg->size <= tmp->start)
247 continue;
248 return -ENOSPC;
249 }
250 list_add(&seg->list, &mem_segs);
251 return 0;
252}
253
254/*
255 * Remove memory segment from the segment list.
256 */
257static void remove_memory_segment(struct memory_segment *seg)
258{
259 list_del(&seg->list);
260}
261
262static void __remove_shared_memory(struct memory_segment *seg)
263{
264 remove_memory_segment(seg);
265 vmem_remove_range(seg->start, seg->size);
266}
267
268int remove_shared_memory(unsigned long start, unsigned long size)
269{
270 struct memory_segment *seg;
271 int ret;
272
273 mutex_lock(&vmem_mutex);
274
275 ret = -ENOENT;
276 list_for_each_entry(seg, &mem_segs, list) {
277 if (seg->start == start && seg->size == size)
278 break;
279 }
280
281 if (seg->start != start || seg->size != size)
282 goto out;
283
284 ret = 0;
285 __remove_shared_memory(seg);
286 kfree(seg);
287out:
288 mutex_unlock(&vmem_mutex);
289 return ret;
290}
291
292int add_shared_memory(unsigned long start, unsigned long size)
293{
294 struct memory_segment *seg;
295 struct page *page;
296 unsigned long pfn, num_pfn, end_pfn;
297 int ret;
298
299 mutex_lock(&vmem_mutex);
300 ret = -ENOMEM;
301 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
302 if (!seg)
303 goto out;
304 seg->start = start;
305 seg->size = size;
306
307 ret = insert_memory_segment(seg);
308 if (ret)
309 goto out_free;
310
311 ret = vmem_add_mem(start, size);
312 if (ret)
313 goto out_remove;
314
315 pfn = PFN_DOWN(start);
316 num_pfn = PFN_DOWN(size);
317 end_pfn = pfn + num_pfn;
318
319 page = pfn_to_page(pfn);
320 memset(page, 0, num_pfn * sizeof(struct page));
321
322 for (; pfn < end_pfn; pfn++) {
323 page = pfn_to_page(pfn);
324 init_page_count(page);
325 reset_page_mapcount(page);
326 SetPageReserved(page);
327 INIT_LIST_HEAD(&page->lru);
328 }
329 goto out;
330
331out_remove:
332 __remove_shared_memory(seg);
333out_free:
334 kfree(seg);
335out:
336 mutex_unlock(&vmem_mutex);
337 return ret;
338}
339
340/*
341 * map whole physical memory to virtual memory (identity mapping)
342 */
343void __init vmem_map_init(void)
344{
345 unsigned long map_size;
346 int i;
347
348 map_size = ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page);
349 vmalloc_end = PFN_ALIGN(VMALLOC_END_INIT) - PFN_ALIGN(map_size);
350 vmem_map = (struct page *) vmalloc_end;
351 NODE_DATA(0)->node_mem_map = vmem_map;
352
353 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
354 vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
355}
356
357/*
358 * Convert memory chunk array to a memory segment list so there is a single
359 * list that contains both r/w memory and shared memory segments.
360 */
361static int __init vmem_convert_memory_chunk(void)
362{
363 struct memory_segment *seg;
364 int i;
365
366 mutex_lock(&vmem_mutex);
367 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
368 if (!memory_chunk[i].size)
369 continue;
370 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
371 if (!seg)
372 panic("Out of memory...\n");
373 seg->start = memory_chunk[i].addr;
374 seg->size = memory_chunk[i].size;
375 insert_memory_segment(seg);
376 }
377 mutex_unlock(&vmem_mutex);
378 return 0;
379}
380
381core_initcall(vmem_convert_memory_chunk);