diff options
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/s390/mm/extmem.c | 138 | ||||
-rw-r--r-- | arch/s390/mm/fault.c | 28 | ||||
-rw-r--r-- | arch/s390/mm/init.c | 164 | ||||
-rw-r--r-- | arch/s390/mm/ioremap.c | 84 | ||||
-rw-r--r-- | arch/s390/mm/vmem.c | 381 |
6 files changed, 494 insertions, 303 deletions
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile index aa9a42b6e62d..8e09db1edbb9 100644 --- a/arch/s390/mm/Makefile +++ b/arch/s390/mm/Makefile | |||
@@ -2,6 +2,6 @@ | |||
2 | # Makefile for the linux s390-specific parts of the memory manager. | 2 | # Makefile for the linux s390-specific parts of the memory manager. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := init.o fault.o ioremap.o extmem.o mmap.o | 5 | obj-y := init.o fault.o ioremap.o extmem.o mmap.o vmem.o |
6 | obj-$(CONFIG_CMM) += cmm.o | 6 | obj-$(CONFIG_CMM) += cmm.o |
7 | 7 | ||
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index 226275d5c4f6..775bf19e742b 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c | |||
@@ -14,12 +14,14 @@ | |||
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/bootmem.h> | 16 | #include <linux/bootmem.h> |
17 | #include <linux/ctype.h> | ||
17 | #include <asm/page.h> | 18 | #include <asm/page.h> |
19 | #include <asm/pgtable.h> | ||
18 | #include <asm/ebcdic.h> | 20 | #include <asm/ebcdic.h> |
19 | #include <asm/errno.h> | 21 | #include <asm/errno.h> |
20 | #include <asm/extmem.h> | 22 | #include <asm/extmem.h> |
21 | #include <asm/cpcmd.h> | 23 | #include <asm/cpcmd.h> |
22 | #include <linux/ctype.h> | 24 | #include <asm/setup.h> |
23 | 25 | ||
24 | #define DCSS_DEBUG /* Debug messages on/off */ | 26 | #define DCSS_DEBUG /* Debug messages on/off */ |
25 | 27 | ||
@@ -77,15 +79,11 @@ struct dcss_segment { | |||
77 | int segcnt; | 79 | int segcnt; |
78 | }; | 80 | }; |
79 | 81 | ||
80 | static DEFINE_SPINLOCK(dcss_lock); | 82 | static DEFINE_MUTEX(dcss_lock); |
81 | static struct list_head dcss_list = LIST_HEAD_INIT(dcss_list); | 83 | static struct list_head dcss_list = LIST_HEAD_INIT(dcss_list); |
82 | static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC", | 84 | static char *segtype_string[] = { "SW", "EW", "SR", "ER", "SN", "EN", "SC", |
83 | "EW/EN-MIXED" }; | 85 | "EW/EN-MIXED" }; |
84 | 86 | ||
85 | extern struct { | ||
86 | unsigned long addr, size, type; | ||
87 | } memory_chunk[MEMORY_CHUNKS]; | ||
88 | |||
89 | /* | 87 | /* |
90 | * Create the 8 bytes, ebcdic VM segment name from | 88 | * Create the 8 bytes, ebcdic VM segment name from |
91 | * an ascii name. | 89 | * an ascii name. |
@@ -117,7 +115,7 @@ segment_by_name (char *name) | |||
117 | struct list_head *l; | 115 | struct list_head *l; |
118 | struct dcss_segment *tmp, *retval = NULL; | 116 | struct dcss_segment *tmp, *retval = NULL; |
119 | 117 | ||
120 | assert_spin_locked(&dcss_lock); | 118 | BUG_ON(!mutex_is_locked(&dcss_lock)); |
121 | dcss_mkname (name, dcss_name); | 119 | dcss_mkname (name, dcss_name); |
122 | list_for_each (l, &dcss_list) { | 120 | list_for_each (l, &dcss_list) { |
123 | tmp = list_entry (l, struct dcss_segment, list); | 121 | tmp = list_entry (l, struct dcss_segment, list); |
@@ -241,65 +239,6 @@ query_segment_type (struct dcss_segment *seg) | |||
241 | } | 239 | } |
242 | 240 | ||
243 | /* | 241 | /* |
244 | * check if the given segment collides with guest storage. | ||
245 | * returns 1 if this is the case, 0 if no collision was found | ||
246 | */ | ||
247 | static int | ||
248 | segment_overlaps_storage(struct dcss_segment *seg) | ||
249 | { | ||
250 | int i; | ||
251 | |||
252 | for (i=0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | ||
253 | if (memory_chunk[i].type != 0) | ||
254 | continue; | ||
255 | if ((memory_chunk[i].addr >> 20) > (seg->end >> 20)) | ||
256 | continue; | ||
257 | if (((memory_chunk[i].addr + memory_chunk[i].size - 1) >> 20) | ||
258 | < (seg->start_addr >> 20)) | ||
259 | continue; | ||
260 | return 1; | ||
261 | } | ||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | /* | ||
266 | * check if segment collides with other segments that are currently loaded | ||
267 | * returns 1 if this is the case, 0 if no collision was found | ||
268 | */ | ||
269 | static int | ||
270 | segment_overlaps_others (struct dcss_segment *seg) | ||
271 | { | ||
272 | struct list_head *l; | ||
273 | struct dcss_segment *tmp; | ||
274 | |||
275 | assert_spin_locked(&dcss_lock); | ||
276 | list_for_each(l, &dcss_list) { | ||
277 | tmp = list_entry(l, struct dcss_segment, list); | ||
278 | if ((tmp->start_addr >> 20) > (seg->end >> 20)) | ||
279 | continue; | ||
280 | if ((tmp->end >> 20) < (seg->start_addr >> 20)) | ||
281 | continue; | ||
282 | if (seg == tmp) | ||
283 | continue; | ||
284 | return 1; | ||
285 | } | ||
286 | return 0; | ||
287 | } | ||
288 | |||
289 | /* | ||
290 | * check if segment exceeds the kernel mapping range (detected or set via mem=) | ||
291 | * returns 1 if this is the case, 0 if segment fits into the range | ||
292 | */ | ||
293 | static inline int | ||
294 | segment_exceeds_range (struct dcss_segment *seg) | ||
295 | { | ||
296 | int seg_last_pfn = (seg->end) >> PAGE_SHIFT; | ||
297 | if (seg_last_pfn > max_pfn) | ||
298 | return 1; | ||
299 | return 0; | ||
300 | } | ||
301 | |||
302 | /* | ||
303 | * get info about a segment | 242 | * get info about a segment |
304 | * possible return values: | 243 | * possible return values: |
305 | * -ENOSYS : we are not running on VM | 244 | * -ENOSYS : we are not running on VM |
@@ -344,24 +283,26 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long | |||
344 | rc = query_segment_type (seg); | 283 | rc = query_segment_type (seg); |
345 | if (rc < 0) | 284 | if (rc < 0) |
346 | goto out_free; | 285 | goto out_free; |
347 | if (segment_exceeds_range(seg)) { | 286 | |
348 | PRINT_WARN ("segment_load: not loading segment %s - exceeds" | 287 | rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); |
349 | " kernel mapping range\n",name); | 288 | |
350 | rc = -ERANGE; | 289 | switch (rc) { |
290 | case 0: | ||
291 | break; | ||
292 | case -ENOSPC: | ||
293 | PRINT_WARN("segment_load: not loading segment %s - overlaps " | ||
294 | "storage/segment\n", name); | ||
351 | goto out_free; | 295 | goto out_free; |
352 | } | 296 | case -ERANGE: |
353 | if (segment_overlaps_storage(seg)) { | 297 | PRINT_WARN("segment_load: not loading segment %s - exceeds " |
354 | PRINT_WARN ("segment_load: not loading segment %s - overlaps" | 298 | "kernel mapping range\n", name); |
355 | " storage\n",name); | ||
356 | rc = -ENOSPC; | ||
357 | goto out_free; | 299 | goto out_free; |
358 | } | 300 | default: |
359 | if (segment_overlaps_others(seg)) { | 301 | PRINT_WARN("segment_load: not loading segment %s (rc: %d)\n", |
360 | PRINT_WARN ("segment_load: not loading segment %s - overlaps" | 302 | name, rc); |
361 | " other segments\n",name); | ||
362 | rc = -EBUSY; | ||
363 | goto out_free; | 303 | goto out_free; |
364 | } | 304 | } |
305 | |||
365 | if (do_nonshared) | 306 | if (do_nonshared) |
366 | dcss_command = DCSS_LOADNSR; | 307 | dcss_command = DCSS_LOADNSR; |
367 | else | 308 | else |
@@ -375,7 +316,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long | |||
375 | rc = dcss_diag_translate_rc (seg->end); | 316 | rc = dcss_diag_translate_rc (seg->end); |
376 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, | 317 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, |
377 | &seg->start_addr, &seg->end); | 318 | &seg->start_addr, &seg->end); |
378 | goto out_free; | 319 | goto out_shared; |
379 | } | 320 | } |
380 | seg->do_nonshared = do_nonshared; | 321 | seg->do_nonshared = do_nonshared; |
381 | atomic_set(&seg->ref_count, 1); | 322 | atomic_set(&seg->ref_count, 1); |
@@ -394,6 +335,8 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long | |||
394 | (void*)seg->start_addr, (void*)seg->end, | 335 | (void*)seg->start_addr, (void*)seg->end, |
395 | segtype_string[seg->vm_segtype]); | 336 | segtype_string[seg->vm_segtype]); |
396 | goto out; | 337 | goto out; |
338 | out_shared: | ||
339 | remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); | ||
397 | out_free: | 340 | out_free: |
398 | kfree(seg); | 341 | kfree(seg); |
399 | out: | 342 | out: |
@@ -429,7 +372,7 @@ segment_load (char *name, int do_nonshared, unsigned long *addr, | |||
429 | if (!MACHINE_IS_VM) | 372 | if (!MACHINE_IS_VM) |
430 | return -ENOSYS; | 373 | return -ENOSYS; |
431 | 374 | ||
432 | spin_lock (&dcss_lock); | 375 | mutex_lock(&dcss_lock); |
433 | seg = segment_by_name (name); | 376 | seg = segment_by_name (name); |
434 | if (seg == NULL) | 377 | if (seg == NULL) |
435 | rc = __segment_load (name, do_nonshared, addr, end); | 378 | rc = __segment_load (name, do_nonshared, addr, end); |
@@ -444,7 +387,7 @@ segment_load (char *name, int do_nonshared, unsigned long *addr, | |||
444 | rc = -EPERM; | 387 | rc = -EPERM; |
445 | } | 388 | } |
446 | } | 389 | } |
447 | spin_unlock (&dcss_lock); | 390 | mutex_unlock(&dcss_lock); |
448 | return rc; | 391 | return rc; |
449 | } | 392 | } |
450 | 393 | ||
@@ -467,7 +410,7 @@ segment_modify_shared (char *name, int do_nonshared) | |||
467 | unsigned long dummy; | 410 | unsigned long dummy; |
468 | int dcss_command, rc, diag_cc; | 411 | int dcss_command, rc, diag_cc; |
469 | 412 | ||
470 | spin_lock (&dcss_lock); | 413 | mutex_lock(&dcss_lock); |
471 | seg = segment_by_name (name); | 414 | seg = segment_by_name (name); |
472 | if (seg == NULL) { | 415 | if (seg == NULL) { |
473 | rc = -EINVAL; | 416 | rc = -EINVAL; |
@@ -508,7 +451,7 @@ segment_modify_shared (char *name, int do_nonshared) | |||
508 | &dummy, &dummy); | 451 | &dummy, &dummy); |
509 | kfree(seg); | 452 | kfree(seg); |
510 | out_unlock: | 453 | out_unlock: |
511 | spin_unlock(&dcss_lock); | 454 | mutex_unlock(&dcss_lock); |
512 | return rc; | 455 | return rc; |
513 | } | 456 | } |
514 | 457 | ||
@@ -526,21 +469,21 @@ segment_unload(char *name) | |||
526 | if (!MACHINE_IS_VM) | 469 | if (!MACHINE_IS_VM) |
527 | return; | 470 | return; |
528 | 471 | ||
529 | spin_lock(&dcss_lock); | 472 | mutex_lock(&dcss_lock); |
530 | seg = segment_by_name (name); | 473 | seg = segment_by_name (name); |
531 | if (seg == NULL) { | 474 | if (seg == NULL) { |
532 | PRINT_ERR ("could not find segment %s in segment_unload, " | 475 | PRINT_ERR ("could not find segment %s in segment_unload, " |
533 | "please report to linux390@de.ibm.com\n",name); | 476 | "please report to linux390@de.ibm.com\n",name); |
534 | goto out_unlock; | 477 | goto out_unlock; |
535 | } | 478 | } |
536 | if (atomic_dec_return(&seg->ref_count) == 0) { | 479 | if (atomic_dec_return(&seg->ref_count) != 0) |
537 | list_del(&seg->list); | 480 | goto out_unlock; |
538 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, | 481 | remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); |
539 | &dummy, &dummy); | 482 | list_del(&seg->list); |
540 | kfree(seg); | 483 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy); |
541 | } | 484 | kfree(seg); |
542 | out_unlock: | 485 | out_unlock: |
543 | spin_unlock(&dcss_lock); | 486 | mutex_unlock(&dcss_lock); |
544 | } | 487 | } |
545 | 488 | ||
546 | /* | 489 | /* |
@@ -559,12 +502,13 @@ segment_save(char *name) | |||
559 | if (!MACHINE_IS_VM) | 502 | if (!MACHINE_IS_VM) |
560 | return; | 503 | return; |
561 | 504 | ||
562 | spin_lock(&dcss_lock); | 505 | mutex_lock(&dcss_lock); |
563 | seg = segment_by_name (name); | 506 | seg = segment_by_name (name); |
564 | 507 | ||
565 | if (seg == NULL) { | 508 | if (seg == NULL) { |
566 | PRINT_ERR ("could not find segment %s in segment_save, please report to linux390@de.ibm.com\n",name); | 509 | PRINT_ERR("could not find segment %s in segment_save, please " |
567 | return; | 510 | "report to linux390@de.ibm.com\n", name); |
511 | goto out; | ||
568 | } | 512 | } |
569 | 513 | ||
570 | startpfn = seg->start_addr >> PAGE_SHIFT; | 514 | startpfn = seg->start_addr >> PAGE_SHIFT; |
@@ -591,7 +535,7 @@ segment_save(char *name) | |||
591 | goto out; | 535 | goto out; |
592 | } | 536 | } |
593 | out: | 537 | out: |
594 | spin_unlock(&dcss_lock); | 538 | mutex_unlock(&dcss_lock); |
595 | } | 539 | } |
596 | 540 | ||
597 | EXPORT_SYMBOL(segment_load); | 541 | EXPORT_SYMBOL(segment_load); |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 1c323bbfda91..cd85e34d8703 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <asm/uaccess.h> | 31 | #include <asm/uaccess.h> |
32 | #include <asm/pgtable.h> | 32 | #include <asm/pgtable.h> |
33 | #include <asm/kdebug.h> | 33 | #include <asm/kdebug.h> |
34 | #include <asm/s390_ext.h> | ||
34 | 35 | ||
35 | #ifndef CONFIG_64BIT | 36 | #ifndef CONFIG_64BIT |
36 | #define __FAIL_ADDR_MASK 0x7ffff000 | 37 | #define __FAIL_ADDR_MASK 0x7ffff000 |
@@ -394,6 +395,7 @@ void do_dat_exception(struct pt_regs *regs, unsigned long error_code) | |||
394 | /* | 395 | /* |
395 | * 'pfault' pseudo page faults routines. | 396 | * 'pfault' pseudo page faults routines. |
396 | */ | 397 | */ |
398 | static ext_int_info_t ext_int_pfault; | ||
397 | static int pfault_disable = 0; | 399 | static int pfault_disable = 0; |
398 | 400 | ||
399 | static int __init nopfault(char *str) | 401 | static int __init nopfault(char *str) |
@@ -422,7 +424,7 @@ int pfault_init(void) | |||
422 | __PF_RES_FIELD }; | 424 | __PF_RES_FIELD }; |
423 | int rc; | 425 | int rc; |
424 | 426 | ||
425 | if (pfault_disable) | 427 | if (!MACHINE_IS_VM || pfault_disable) |
426 | return -1; | 428 | return -1; |
427 | asm volatile( | 429 | asm volatile( |
428 | " diag %1,%0,0x258\n" | 430 | " diag %1,%0,0x258\n" |
@@ -440,7 +442,7 @@ void pfault_fini(void) | |||
440 | pfault_refbk_t refbk = | 442 | pfault_refbk_t refbk = |
441 | { 0x258, 1, 5, 2, 0ULL, 0ULL, 0ULL, 0ULL }; | 443 | { 0x258, 1, 5, 2, 0ULL, 0ULL, 0ULL, 0ULL }; |
442 | 444 | ||
443 | if (pfault_disable) | 445 | if (!MACHINE_IS_VM || pfault_disable) |
444 | return; | 446 | return; |
445 | __ctl_clear_bit(0,9); | 447 | __ctl_clear_bit(0,9); |
446 | asm volatile( | 448 | asm volatile( |
@@ -500,5 +502,25 @@ pfault_interrupt(__u16 error_code) | |||
500 | set_tsk_need_resched(tsk); | 502 | set_tsk_need_resched(tsk); |
501 | } | 503 | } |
502 | } | 504 | } |
503 | #endif | ||
504 | 505 | ||
506 | void __init pfault_irq_init(void) | ||
507 | { | ||
508 | if (!MACHINE_IS_VM) | ||
509 | return; | ||
510 | |||
511 | /* | ||
512 | * Try to get pfault pseudo page faults going. | ||
513 | */ | ||
514 | if (register_early_external_interrupt(0x2603, pfault_interrupt, | ||
515 | &ext_int_pfault) != 0) | ||
516 | panic("Couldn't request external interrupt 0x2603"); | ||
517 | |||
518 | if (pfault_init() == 0) | ||
519 | return; | ||
520 | |||
521 | /* Tough luck, no pfault. */ | ||
522 | pfault_disable = 1; | ||
523 | unregister_early_external_interrupt(0x2603, pfault_interrupt, | ||
524 | &ext_int_pfault); | ||
525 | } | ||
526 | #endif | ||
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index d99891718709..4bb21be3b007 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/pagemap.h> | 24 | #include <linux/pagemap.h> |
25 | #include <linux/bootmem.h> | 25 | #include <linux/bootmem.h> |
26 | #include <linux/pfn.h> | 26 | #include <linux/pfn.h> |
27 | #include <linux/poison.h> | ||
27 | 28 | ||
28 | #include <asm/processor.h> | 29 | #include <asm/processor.h> |
29 | #include <asm/system.h> | 30 | #include <asm/system.h> |
@@ -69,6 +70,8 @@ void show_mem(void) | |||
69 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | 70 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); |
70 | i = max_mapnr; | 71 | i = max_mapnr; |
71 | while (i-- > 0) { | 72 | while (i-- > 0) { |
73 | if (!pfn_valid(i)) | ||
74 | continue; | ||
72 | page = pfn_to_page(i); | 75 | page = pfn_to_page(i); |
73 | total++; | 76 | total++; |
74 | if (PageReserved(page)) | 77 | if (PageReserved(page)) |
@@ -84,65 +87,52 @@ void show_mem(void) | |||
84 | printk("%d pages swap cached\n",cached); | 87 | printk("%d pages swap cached\n",cached); |
85 | } | 88 | } |
86 | 89 | ||
90 | static void __init setup_ro_region(void) | ||
91 | { | ||
92 | pgd_t *pgd; | ||
93 | pmd_t *pmd; | ||
94 | pte_t *pte; | ||
95 | pte_t new_pte; | ||
96 | unsigned long address, end; | ||
97 | |||
98 | address = ((unsigned long)&__start_rodata) & PAGE_MASK; | ||
99 | end = PFN_ALIGN((unsigned long)&__end_rodata); | ||
100 | |||
101 | for (; address < end; address += PAGE_SIZE) { | ||
102 | pgd = pgd_offset_k(address); | ||
103 | pmd = pmd_offset(pgd, address); | ||
104 | pte = pte_offset_kernel(pmd, address); | ||
105 | new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO)); | ||
106 | set_pte(pte, new_pte); | ||
107 | } | ||
108 | } | ||
109 | |||
110 | extern void vmem_map_init(void); | ||
111 | |||
87 | /* | 112 | /* |
88 | * paging_init() sets up the page tables | 113 | * paging_init() sets up the page tables |
89 | */ | 114 | */ |
90 | |||
91 | #ifndef CONFIG_64BIT | ||
92 | void __init paging_init(void) | 115 | void __init paging_init(void) |
93 | { | 116 | { |
94 | pgd_t * pg_dir; | 117 | pgd_t *pg_dir; |
95 | pte_t * pg_table; | 118 | int i; |
96 | pte_t pte; | 119 | unsigned long pgdir_k; |
97 | int i; | 120 | static const int ssm_mask = 0x04000000L; |
98 | unsigned long tmp; | ||
99 | unsigned long pfn = 0; | ||
100 | unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; | ||
101 | static const int ssm_mask = 0x04000000L; | ||
102 | unsigned long ro_start_pfn, ro_end_pfn; | ||
103 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 121 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
104 | 122 | ||
105 | ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); | 123 | pg_dir = swapper_pg_dir; |
106 | ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); | ||
107 | |||
108 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | ||
109 | max_zone_pfns[ZONE_DMA] = max_low_pfn; | ||
110 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; | ||
111 | free_area_init_nodes(max_zone_pfns); | ||
112 | |||
113 | /* unmap whole virtual address space */ | ||
114 | 124 | ||
115 | pg_dir = swapper_pg_dir; | 125 | #ifdef CONFIG_64BIT |
116 | 126 | pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE; | |
117 | for (i = 0; i < PTRS_PER_PGD; i++) | 127 | for (i = 0; i < PTRS_PER_PGD; i++) |
118 | pmd_clear((pmd_t *) pg_dir++); | 128 | pgd_clear(pg_dir + i); |
119 | 129 | #else | |
120 | /* | 130 | pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; |
121 | * map whole physical memory to virtual memory (identity mapping) | 131 | for (i = 0; i < PTRS_PER_PGD; i++) |
122 | */ | 132 | pmd_clear((pmd_t *)(pg_dir + i)); |
123 | 133 | #endif | |
124 | pg_dir = swapper_pg_dir; | 134 | vmem_map_init(); |
125 | 135 | setup_ro_region(); | |
126 | while (pfn < max_low_pfn) { | ||
127 | /* | ||
128 | * pg_table is physical at this point | ||
129 | */ | ||
130 | pg_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); | ||
131 | |||
132 | pmd_populate_kernel(&init_mm, (pmd_t *) pg_dir, pg_table); | ||
133 | pg_dir++; | ||
134 | |||
135 | for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) { | ||
136 | if (pfn >= ro_start_pfn && pfn < ro_end_pfn) | ||
137 | pte = pfn_pte(pfn, __pgprot(_PAGE_RO)); | ||
138 | else | ||
139 | pte = pfn_pte(pfn, PAGE_KERNEL); | ||
140 | if (pfn >= max_low_pfn) | ||
141 | pte_val(pte) = _PAGE_TYPE_EMPTY; | ||
142 | set_pte(pg_table, pte); | ||
143 | pfn++; | ||
144 | } | ||
145 | } | ||
146 | 136 | ||
147 | S390_lowcore.kernel_asce = pgdir_k; | 137 | S390_lowcore.kernel_asce = pgdir_k; |
148 | 138 | ||
@@ -152,82 +142,11 @@ void __init paging_init(void) | |||
152 | __ctl_load(pgdir_k, 13, 13); | 142 | __ctl_load(pgdir_k, 13, 13); |
153 | __raw_local_irq_ssm(ssm_mask); | 143 | __raw_local_irq_ssm(ssm_mask); |
154 | 144 | ||
155 | local_flush_tlb(); | ||
156 | } | ||
157 | |||
158 | #else /* CONFIG_64BIT */ | ||
159 | |||
160 | void __init paging_init(void) | ||
161 | { | ||
162 | pgd_t * pg_dir; | ||
163 | pmd_t * pm_dir; | ||
164 | pte_t * pt_dir; | ||
165 | pte_t pte; | ||
166 | int i,j,k; | ||
167 | unsigned long pfn = 0; | ||
168 | unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | | ||
169 | _KERN_REGION_TABLE; | ||
170 | static const int ssm_mask = 0x04000000L; | ||
171 | unsigned long ro_start_pfn, ro_end_pfn; | ||
172 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | ||
173 | |||
174 | ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); | ||
175 | ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); | ||
176 | |||
177 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | 145 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
178 | max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); | 146 | max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); |
179 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; | 147 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
180 | free_area_init_nodes(max_zone_pfns); | 148 | free_area_init_nodes(max_zone_pfns); |
181 | |||
182 | /* | ||
183 | * map whole physical memory to virtual memory (identity mapping) | ||
184 | */ | ||
185 | |||
186 | pg_dir = swapper_pg_dir; | ||
187 | |||
188 | for (i = 0 ; i < PTRS_PER_PGD ; i++,pg_dir++) { | ||
189 | |||
190 | if (pfn >= max_low_pfn) { | ||
191 | pgd_clear(pg_dir); | ||
192 | continue; | ||
193 | } | ||
194 | |||
195 | pm_dir = (pmd_t *) alloc_bootmem_pages(PAGE_SIZE * 4); | ||
196 | pgd_populate(&init_mm, pg_dir, pm_dir); | ||
197 | |||
198 | for (j = 0 ; j < PTRS_PER_PMD ; j++,pm_dir++) { | ||
199 | if (pfn >= max_low_pfn) { | ||
200 | pmd_clear(pm_dir); | ||
201 | continue; | ||
202 | } | ||
203 | |||
204 | pt_dir = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); | ||
205 | pmd_populate_kernel(&init_mm, pm_dir, pt_dir); | ||
206 | |||
207 | for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) { | ||
208 | if (pfn >= ro_start_pfn && pfn < ro_end_pfn) | ||
209 | pte = pfn_pte(pfn, __pgprot(_PAGE_RO)); | ||
210 | else | ||
211 | pte = pfn_pte(pfn, PAGE_KERNEL); | ||
212 | if (pfn >= max_low_pfn) | ||
213 | pte_val(pte) = _PAGE_TYPE_EMPTY; | ||
214 | set_pte(pt_dir, pte); | ||
215 | pfn++; | ||
216 | } | ||
217 | } | ||
218 | } | ||
219 | |||
220 | S390_lowcore.kernel_asce = pgdir_k; | ||
221 | |||
222 | /* enable virtual mapping in kernel mode */ | ||
223 | __ctl_load(pgdir_k, 1, 1); | ||
224 | __ctl_load(pgdir_k, 7, 7); | ||
225 | __ctl_load(pgdir_k, 13, 13); | ||
226 | __raw_local_irq_ssm(ssm_mask); | ||
227 | |||
228 | local_flush_tlb(); | ||
229 | } | 149 | } |
230 | #endif /* CONFIG_64BIT */ | ||
231 | 150 | ||
232 | void __init mem_init(void) | 151 | void __init mem_init(void) |
233 | { | 152 | { |
@@ -257,6 +176,8 @@ void __init mem_init(void) | |||
257 | printk("Write protected kernel read-only data: %#lx - %#lx\n", | 176 | printk("Write protected kernel read-only data: %#lx - %#lx\n", |
258 | (unsigned long)&__start_rodata, | 177 | (unsigned long)&__start_rodata, |
259 | PFN_ALIGN((unsigned long)&__end_rodata) - 1); | 178 | PFN_ALIGN((unsigned long)&__end_rodata) - 1); |
179 | printk("Virtual memmap size: %ldk\n", | ||
180 | (max_pfn * sizeof(struct page)) >> 10); | ||
260 | } | 181 | } |
261 | 182 | ||
262 | void free_initmem(void) | 183 | void free_initmem(void) |
@@ -267,6 +188,7 @@ void free_initmem(void) | |||
267 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { | 188 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { |
268 | ClearPageReserved(virt_to_page(addr)); | 189 | ClearPageReserved(virt_to_page(addr)); |
269 | init_page_count(virt_to_page(addr)); | 190 | init_page_count(virt_to_page(addr)); |
191 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); | ||
270 | free_page(addr); | 192 | free_page(addr); |
271 | totalram_pages++; | 193 | totalram_pages++; |
272 | } | 194 | } |
diff --git a/arch/s390/mm/ioremap.c b/arch/s390/mm/ioremap.c index 0f6e9ecbefe2..3d2100a4e209 100644 --- a/arch/s390/mm/ioremap.c +++ b/arch/s390/mm/ioremap.c | |||
@@ -15,87 +15,8 @@ | |||
15 | 15 | ||
16 | #include <linux/vmalloc.h> | 16 | #include <linux/vmalloc.h> |
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <asm/io.h> | 18 | #include <linux/io.h> |
19 | #include <asm/pgalloc.h> | 19 | #include <asm/pgalloc.h> |
20 | #include <asm/cacheflush.h> | ||
21 | #include <asm/tlbflush.h> | ||
22 | |||
23 | static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, | ||
24 | unsigned long phys_addr, unsigned long flags) | ||
25 | { | ||
26 | unsigned long end; | ||
27 | unsigned long pfn; | ||
28 | |||
29 | address &= ~PMD_MASK; | ||
30 | end = address + size; | ||
31 | if (end > PMD_SIZE) | ||
32 | end = PMD_SIZE; | ||
33 | if (address >= end) | ||
34 | BUG(); | ||
35 | pfn = phys_addr >> PAGE_SHIFT; | ||
36 | do { | ||
37 | if (!pte_none(*pte)) { | ||
38 | printk("remap_area_pte: page already exists\n"); | ||
39 | BUG(); | ||
40 | } | ||
41 | set_pte(pte, pfn_pte(pfn, __pgprot(flags))); | ||
42 | address += PAGE_SIZE; | ||
43 | pfn++; | ||
44 | pte++; | ||
45 | } while (address && (address < end)); | ||
46 | } | ||
47 | |||
48 | static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, | ||
49 | unsigned long phys_addr, unsigned long flags) | ||
50 | { | ||
51 | unsigned long end; | ||
52 | |||
53 | address &= ~PGDIR_MASK; | ||
54 | end = address + size; | ||
55 | if (end > PGDIR_SIZE) | ||
56 | end = PGDIR_SIZE; | ||
57 | phys_addr -= address; | ||
58 | if (address >= end) | ||
59 | BUG(); | ||
60 | do { | ||
61 | pte_t * pte = pte_alloc_kernel(pmd, address); | ||
62 | if (!pte) | ||
63 | return -ENOMEM; | ||
64 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); | ||
65 | address = (address + PMD_SIZE) & PMD_MASK; | ||
66 | pmd++; | ||
67 | } while (address && (address < end)); | ||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | static int remap_area_pages(unsigned long address, unsigned long phys_addr, | ||
72 | unsigned long size, unsigned long flags) | ||
73 | { | ||
74 | int error; | ||
75 | pgd_t * dir; | ||
76 | unsigned long end = address + size; | ||
77 | |||
78 | phys_addr -= address; | ||
79 | dir = pgd_offset(&init_mm, address); | ||
80 | flush_cache_all(); | ||
81 | if (address >= end) | ||
82 | BUG(); | ||
83 | do { | ||
84 | pmd_t *pmd; | ||
85 | pmd = pmd_alloc(&init_mm, dir, address); | ||
86 | error = -ENOMEM; | ||
87 | if (!pmd) | ||
88 | break; | ||
89 | if (remap_area_pmd(pmd, address, end - address, | ||
90 | phys_addr + address, flags)) | ||
91 | break; | ||
92 | error = 0; | ||
93 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | ||
94 | dir++; | ||
95 | } while (address && (address < end)); | ||
96 | flush_tlb_all(); | ||
97 | return 0; | ||
98 | } | ||
99 | 20 | ||
100 | /* | 21 | /* |
101 | * Generic mapping function (not visible outside): | 22 | * Generic mapping function (not visible outside): |
@@ -122,7 +43,8 @@ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flag | |||
122 | if (!area) | 43 | if (!area) |
123 | return NULL; | 44 | return NULL; |
124 | addr = area->addr; | 45 | addr = area->addr; |
125 | if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { | 46 | if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, |
47 | phys_addr, __pgprot(flags))) { | ||
126 | vfree(addr); | 48 | vfree(addr); |
127 | return NULL; | 49 | return NULL; |
128 | } | 50 | } |
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c new file mode 100644 index 000000000000..7f2944d3ec2a --- /dev/null +++ b/arch/s390/mm/vmem.c | |||
@@ -0,0 +1,381 @@ | |||
1 | /* | ||
2 | * arch/s390/mm/vmem.c | ||
3 | * | ||
4 | * Copyright IBM Corp. 2006 | ||
5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/bootmem.h> | ||
9 | #include <linux/pfn.h> | ||
10 | #include <linux/mm.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/list.h> | ||
13 | #include <asm/pgalloc.h> | ||
14 | #include <asm/pgtable.h> | ||
15 | #include <asm/setup.h> | ||
16 | #include <asm/tlbflush.h> | ||
17 | |||
18 | unsigned long vmalloc_end; | ||
19 | EXPORT_SYMBOL(vmalloc_end); | ||
20 | |||
21 | static struct page *vmem_map; | ||
22 | static DEFINE_MUTEX(vmem_mutex); | ||
23 | |||
24 | struct memory_segment { | ||
25 | struct list_head list; | ||
26 | unsigned long start; | ||
27 | unsigned long size; | ||
28 | }; | ||
29 | |||
30 | static LIST_HEAD(mem_segs); | ||
31 | |||
32 | void memmap_init(unsigned long size, int nid, unsigned long zone, | ||
33 | unsigned long start_pfn) | ||
34 | { | ||
35 | struct page *start, *end; | ||
36 | struct page *map_start, *map_end; | ||
37 | int i; | ||
38 | |||
39 | start = pfn_to_page(start_pfn); | ||
40 | end = start + size; | ||
41 | |||
42 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | ||
43 | unsigned long cstart, cend; | ||
44 | |||
45 | cstart = PFN_DOWN(memory_chunk[i].addr); | ||
46 | cend = cstart + PFN_DOWN(memory_chunk[i].size); | ||
47 | |||
48 | map_start = mem_map + cstart; | ||
49 | map_end = mem_map + cend; | ||
50 | |||
51 | if (map_start < start) | ||
52 | map_start = start; | ||
53 | if (map_end > end) | ||
54 | map_end = end; | ||
55 | |||
56 | map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) | ||
57 | / sizeof(struct page); | ||
58 | map_end += ((PFN_ALIGN((unsigned long) map_end) | ||
59 | - (unsigned long) map_end) | ||
60 | / sizeof(struct page)); | ||
61 | |||
62 | if (map_start < map_end) | ||
63 | memmap_init_zone((unsigned long)(map_end - map_start), | ||
64 | nid, zone, page_to_pfn(map_start)); | ||
65 | } | ||
66 | } | ||
67 | |||
68 | static inline void *vmem_alloc_pages(unsigned int order) | ||
69 | { | ||
70 | if (slab_is_available()) | ||
71 | return (void *)__get_free_pages(GFP_KERNEL, order); | ||
72 | return alloc_bootmem_pages((1 << order) * PAGE_SIZE); | ||
73 | } | ||
74 | |||
75 | static inline pmd_t *vmem_pmd_alloc(void) | ||
76 | { | ||
77 | pmd_t *pmd; | ||
78 | int i; | ||
79 | |||
80 | pmd = vmem_alloc_pages(PMD_ALLOC_ORDER); | ||
81 | if (!pmd) | ||
82 | return NULL; | ||
83 | for (i = 0; i < PTRS_PER_PMD; i++) | ||
84 | pmd_clear(pmd + i); | ||
85 | return pmd; | ||
86 | } | ||
87 | |||
88 | static inline pte_t *vmem_pte_alloc(void) | ||
89 | { | ||
90 | pte_t *pte; | ||
91 | pte_t empty_pte; | ||
92 | int i; | ||
93 | |||
94 | pte = vmem_alloc_pages(PTE_ALLOC_ORDER); | ||
95 | if (!pte) | ||
96 | return NULL; | ||
97 | pte_val(empty_pte) = _PAGE_TYPE_EMPTY; | ||
98 | for (i = 0; i < PTRS_PER_PTE; i++) | ||
99 | set_pte(pte + i, empty_pte); | ||
100 | return pte; | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * Add a physical memory range to the 1:1 mapping. | ||
105 | */ | ||
106 | static int vmem_add_range(unsigned long start, unsigned long size) | ||
107 | { | ||
108 | unsigned long address; | ||
109 | pgd_t *pg_dir; | ||
110 | pmd_t *pm_dir; | ||
111 | pte_t *pt_dir; | ||
112 | pte_t pte; | ||
113 | int ret = -ENOMEM; | ||
114 | |||
115 | for (address = start; address < start + size; address += PAGE_SIZE) { | ||
116 | pg_dir = pgd_offset_k(address); | ||
117 | if (pgd_none(*pg_dir)) { | ||
118 | pm_dir = vmem_pmd_alloc(); | ||
119 | if (!pm_dir) | ||
120 | goto out; | ||
121 | pgd_populate(&init_mm, pg_dir, pm_dir); | ||
122 | } | ||
123 | |||
124 | pm_dir = pmd_offset(pg_dir, address); | ||
125 | if (pmd_none(*pm_dir)) { | ||
126 | pt_dir = vmem_pte_alloc(); | ||
127 | if (!pt_dir) | ||
128 | goto out; | ||
129 | pmd_populate_kernel(&init_mm, pm_dir, pt_dir); | ||
130 | } | ||
131 | |||
132 | pt_dir = pte_offset_kernel(pm_dir, address); | ||
133 | pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL); | ||
134 | set_pte(pt_dir, pte); | ||
135 | } | ||
136 | ret = 0; | ||
137 | out: | ||
138 | flush_tlb_kernel_range(start, start + size); | ||
139 | return ret; | ||
140 | } | ||
141 | |||
142 | /* | ||
143 | * Remove a physical memory range from the 1:1 mapping. | ||
144 | * Currently only invalidates page table entries. | ||
145 | */ | ||
146 | static void vmem_remove_range(unsigned long start, unsigned long size) | ||
147 | { | ||
148 | unsigned long address; | ||
149 | pgd_t *pg_dir; | ||
150 | pmd_t *pm_dir; | ||
151 | pte_t *pt_dir; | ||
152 | pte_t pte; | ||
153 | |||
154 | pte_val(pte) = _PAGE_TYPE_EMPTY; | ||
155 | for (address = start; address < start + size; address += PAGE_SIZE) { | ||
156 | pg_dir = pgd_offset_k(address); | ||
157 | if (pgd_none(*pg_dir)) | ||
158 | continue; | ||
159 | pm_dir = pmd_offset(pg_dir, address); | ||
160 | if (pmd_none(*pm_dir)) | ||
161 | continue; | ||
162 | pt_dir = pte_offset_kernel(pm_dir, address); | ||
163 | set_pte(pt_dir, pte); | ||
164 | } | ||
165 | flush_tlb_kernel_range(start, start + size); | ||
166 | } | ||
167 | |||
168 | /* | ||
169 | * Add a backed mem_map array to the virtual mem_map array. | ||
170 | */ | ||
171 | static int vmem_add_mem_map(unsigned long start, unsigned long size) | ||
172 | { | ||
173 | unsigned long address, start_addr, end_addr; | ||
174 | struct page *map_start, *map_end; | ||
175 | pgd_t *pg_dir; | ||
176 | pmd_t *pm_dir; | ||
177 | pte_t *pt_dir; | ||
178 | pte_t pte; | ||
179 | int ret = -ENOMEM; | ||
180 | |||
181 | map_start = vmem_map + PFN_DOWN(start); | ||
182 | map_end = vmem_map + PFN_DOWN(start + size); | ||
183 | |||
184 | start_addr = (unsigned long) map_start & PAGE_MASK; | ||
185 | end_addr = PFN_ALIGN((unsigned long) map_end); | ||
186 | |||
187 | for (address = start_addr; address < end_addr; address += PAGE_SIZE) { | ||
188 | pg_dir = pgd_offset_k(address); | ||
189 | if (pgd_none(*pg_dir)) { | ||
190 | pm_dir = vmem_pmd_alloc(); | ||
191 | if (!pm_dir) | ||
192 | goto out; | ||
193 | pgd_populate(&init_mm, pg_dir, pm_dir); | ||
194 | } | ||
195 | |||
196 | pm_dir = pmd_offset(pg_dir, address); | ||
197 | if (pmd_none(*pm_dir)) { | ||
198 | pt_dir = vmem_pte_alloc(); | ||
199 | if (!pt_dir) | ||
200 | goto out; | ||
201 | pmd_populate_kernel(&init_mm, pm_dir, pt_dir); | ||
202 | } | ||
203 | |||
204 | pt_dir = pte_offset_kernel(pm_dir, address); | ||
205 | if (pte_none(*pt_dir)) { | ||
206 | unsigned long new_page; | ||
207 | |||
208 | new_page =__pa(vmem_alloc_pages(0)); | ||
209 | if (!new_page) | ||
210 | goto out; | ||
211 | pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); | ||
212 | set_pte(pt_dir, pte); | ||
213 | } | ||
214 | } | ||
215 | ret = 0; | ||
216 | out: | ||
217 | flush_tlb_kernel_range(start_addr, end_addr); | ||
218 | return ret; | ||
219 | } | ||
220 | |||
221 | static int vmem_add_mem(unsigned long start, unsigned long size) | ||
222 | { | ||
223 | int ret; | ||
224 | |||
225 | ret = vmem_add_range(start, size); | ||
226 | if (ret) | ||
227 | return ret; | ||
228 | return vmem_add_mem_map(start, size); | ||
229 | } | ||
230 | |||
231 | /* | ||
232 | * Add memory segment to the segment list if it doesn't overlap with | ||
233 | * an already present segment. | ||
234 | */ | ||
235 | static int insert_memory_segment(struct memory_segment *seg) | ||
236 | { | ||
237 | struct memory_segment *tmp; | ||
238 | |||
239 | if (PFN_DOWN(seg->start + seg->size) > max_pfn || | ||
240 | seg->start + seg->size < seg->start) | ||
241 | return -ERANGE; | ||
242 | |||
243 | list_for_each_entry(tmp, &mem_segs, list) { | ||
244 | if (seg->start >= tmp->start + tmp->size) | ||
245 | continue; | ||
246 | if (seg->start + seg->size <= tmp->start) | ||
247 | continue; | ||
248 | return -ENOSPC; | ||
249 | } | ||
250 | list_add(&seg->list, &mem_segs); | ||
251 | return 0; | ||
252 | } | ||
253 | |||
254 | /* | ||
255 | * Remove memory segment from the segment list. | ||
256 | */ | ||
257 | static void remove_memory_segment(struct memory_segment *seg) | ||
258 | { | ||
259 | list_del(&seg->list); | ||
260 | } | ||
261 | |||
262 | static void __remove_shared_memory(struct memory_segment *seg) | ||
263 | { | ||
264 | remove_memory_segment(seg); | ||
265 | vmem_remove_range(seg->start, seg->size); | ||
266 | } | ||
267 | |||
268 | int remove_shared_memory(unsigned long start, unsigned long size) | ||
269 | { | ||
270 | struct memory_segment *seg; | ||
271 | int ret; | ||
272 | |||
273 | mutex_lock(&vmem_mutex); | ||
274 | |||
275 | ret = -ENOENT; | ||
276 | list_for_each_entry(seg, &mem_segs, list) { | ||
277 | if (seg->start == start && seg->size == size) | ||
278 | break; | ||
279 | } | ||
280 | |||
281 | if (seg->start != start || seg->size != size) | ||
282 | goto out; | ||
283 | |||
284 | ret = 0; | ||
285 | __remove_shared_memory(seg); | ||
286 | kfree(seg); | ||
287 | out: | ||
288 | mutex_unlock(&vmem_mutex); | ||
289 | return ret; | ||
290 | } | ||
291 | |||
292 | int add_shared_memory(unsigned long start, unsigned long size) | ||
293 | { | ||
294 | struct memory_segment *seg; | ||
295 | struct page *page; | ||
296 | unsigned long pfn, num_pfn, end_pfn; | ||
297 | int ret; | ||
298 | |||
299 | mutex_lock(&vmem_mutex); | ||
300 | ret = -ENOMEM; | ||
301 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); | ||
302 | if (!seg) | ||
303 | goto out; | ||
304 | seg->start = start; | ||
305 | seg->size = size; | ||
306 | |||
307 | ret = insert_memory_segment(seg); | ||
308 | if (ret) | ||
309 | goto out_free; | ||
310 | |||
311 | ret = vmem_add_mem(start, size); | ||
312 | if (ret) | ||
313 | goto out_remove; | ||
314 | |||
315 | pfn = PFN_DOWN(start); | ||
316 | num_pfn = PFN_DOWN(size); | ||
317 | end_pfn = pfn + num_pfn; | ||
318 | |||
319 | page = pfn_to_page(pfn); | ||
320 | memset(page, 0, num_pfn * sizeof(struct page)); | ||
321 | |||
322 | for (; pfn < end_pfn; pfn++) { | ||
323 | page = pfn_to_page(pfn); | ||
324 | init_page_count(page); | ||
325 | reset_page_mapcount(page); | ||
326 | SetPageReserved(page); | ||
327 | INIT_LIST_HEAD(&page->lru); | ||
328 | } | ||
329 | goto out; | ||
330 | |||
331 | out_remove: | ||
332 | __remove_shared_memory(seg); | ||
333 | out_free: | ||
334 | kfree(seg); | ||
335 | out: | ||
336 | mutex_unlock(&vmem_mutex); | ||
337 | return ret; | ||
338 | } | ||
339 | |||
340 | /* | ||
341 | * map whole physical memory to virtual memory (identity mapping) | ||
342 | */ | ||
343 | void __init vmem_map_init(void) | ||
344 | { | ||
345 | unsigned long map_size; | ||
346 | int i; | ||
347 | |||
348 | map_size = ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page); | ||
349 | vmalloc_end = PFN_ALIGN(VMALLOC_END_INIT) - PFN_ALIGN(map_size); | ||
350 | vmem_map = (struct page *) vmalloc_end; | ||
351 | NODE_DATA(0)->node_mem_map = vmem_map; | ||
352 | |||
353 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) | ||
354 | vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size); | ||
355 | } | ||
356 | |||
357 | /* | ||
358 | * Convert memory chunk array to a memory segment list so there is a single | ||
359 | * list that contains both r/w memory and shared memory segments. | ||
360 | */ | ||
361 | static int __init vmem_convert_memory_chunk(void) | ||
362 | { | ||
363 | struct memory_segment *seg; | ||
364 | int i; | ||
365 | |||
366 | mutex_lock(&vmem_mutex); | ||
367 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | ||
368 | if (!memory_chunk[i].size) | ||
369 | continue; | ||
370 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); | ||
371 | if (!seg) | ||
372 | panic("Out of memory...\n"); | ||
373 | seg->start = memory_chunk[i].addr; | ||
374 | seg->size = memory_chunk[i].size; | ||
375 | insert_memory_segment(seg); | ||
376 | } | ||
377 | mutex_unlock(&vmem_mutex); | ||
378 | return 0; | ||
379 | } | ||
380 | |||
381 | core_initcall(vmem_convert_memory_chunk); | ||