aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/mm/init.c')
-rw-r--r--arch/parisc/mm/init.c264
1 files changed, 146 insertions, 118 deletions
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index b7ed8d7a9b33..5fa1e273006e 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -266,8 +266,10 @@ static void __init setup_bootmem(void)
266 } 266 }
267 memset(pfnnid_map, 0xff, sizeof(pfnnid_map)); 267 memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
268 268
269 for (i = 0; i < npmem_ranges; i++) 269 for (i = 0; i < npmem_ranges; i++) {
270 node_set_state(i, N_NORMAL_MEMORY);
270 node_set_online(i); 271 node_set_online(i);
272 }
271#endif 273#endif
272 274
273 /* 275 /*
@@ -369,24 +371,158 @@ static void __init setup_bootmem(void)
369 request_resource(&sysram_resources[0], &pdcdata_resource); 371 request_resource(&sysram_resources[0], &pdcdata_resource);
370} 372}
371 373
374static void __init map_pages(unsigned long start_vaddr,
375 unsigned long start_paddr, unsigned long size,
376 pgprot_t pgprot, int force)
377{
378 pgd_t *pg_dir;
379 pmd_t *pmd;
380 pte_t *pg_table;
381 unsigned long end_paddr;
382 unsigned long start_pmd;
383 unsigned long start_pte;
384 unsigned long tmp1;
385 unsigned long tmp2;
386 unsigned long address;
387 unsigned long vaddr;
388 unsigned long ro_start;
389 unsigned long ro_end;
390 unsigned long fv_addr;
391 unsigned long gw_addr;
392 extern const unsigned long fault_vector_20;
393 extern void * const linux_gateway_page;
394
395 ro_start = __pa((unsigned long)_text);
396 ro_end = __pa((unsigned long)&data_start);
397 fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
398 gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
399
400 end_paddr = start_paddr + size;
401
402 pg_dir = pgd_offset_k(start_vaddr);
403
404#if PTRS_PER_PMD == 1
405 start_pmd = 0;
406#else
407 start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
408#endif
409 start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
410
411 address = start_paddr;
412 vaddr = start_vaddr;
413 while (address < end_paddr) {
414#if PTRS_PER_PMD == 1
415 pmd = (pmd_t *)__pa(pg_dir);
416#else
417 pmd = (pmd_t *)pgd_address(*pg_dir);
418
419 /*
420 * pmd is physical at this point
421 */
422
423 if (!pmd) {
424 pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE << PMD_ORDER);
425 pmd = (pmd_t *) __pa(pmd);
426 }
427
428 pgd_populate(NULL, pg_dir, __va(pmd));
429#endif
430 pg_dir++;
431
432 /* now change pmd to kernel virtual addresses */
433
434 pmd = (pmd_t *)__va(pmd) + start_pmd;
435 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
436
437 /*
438 * pg_table is physical at this point
439 */
440
441 pg_table = (pte_t *)pmd_address(*pmd);
442 if (!pg_table) {
443 pg_table = (pte_t *)
444 alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE);
445 pg_table = (pte_t *) __pa(pg_table);
446 }
447
448 pmd_populate_kernel(NULL, pmd, __va(pg_table));
449
450 /* now change pg_table to kernel virtual addresses */
451
452 pg_table = (pte_t *) __va(pg_table) + start_pte;
453 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
454 pte_t pte;
455
456 /*
457 * Map the fault vector writable so we can
458 * write the HPMC checksum.
459 */
460 if (force)
461 pte = __mk_pte(address, pgprot);
462 else if (core_kernel_text(vaddr) &&
463 address != fv_addr)
464 pte = __mk_pte(address, PAGE_KERNEL_EXEC);
465 else
466#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
467 if (address >= ro_start && address < ro_end
468 && address != fv_addr
469 && address != gw_addr)
470 pte = __mk_pte(address, PAGE_KERNEL_RO);
471 else
472#endif
473 pte = __mk_pte(address, pgprot);
474
475 if (address >= end_paddr) {
476 if (force)
477 break;
478 else
479 pte_val(pte) = 0;
480 }
481
482 set_pte(pg_table, pte);
483
484 address += PAGE_SIZE;
485 vaddr += PAGE_SIZE;
486 }
487 start_pte = 0;
488
489 if (address >= end_paddr)
490 break;
491 }
492 start_pmd = 0;
493 }
494}
495
372void free_initmem(void) 496void free_initmem(void)
373{ 497{
374 unsigned long addr; 498 unsigned long addr;
375 unsigned long init_begin = (unsigned long)__init_begin; 499 unsigned long init_begin = (unsigned long)__init_begin;
376 unsigned long init_end = (unsigned long)__init_end; 500 unsigned long init_end = (unsigned long)__init_end;
377 501
378#ifdef CONFIG_DEBUG_KERNEL 502 /* The init text pages are marked R-X. We have to
503 * flush the icache and mark them RW-
504 *
505 * This is tricky, because map_pages is in the init section.
506 * Do a dummy remap of the data section first (the data
507 * section is already PAGE_KERNEL) to pull in the TLB entries
508 * for map_kernel */
509 map_pages(init_begin, __pa(init_begin), init_end - init_begin,
510 PAGE_KERNEL_RWX, 1);
511 /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute
512 * map_pages */
513 map_pages(init_begin, __pa(init_begin), init_end - init_begin,
514 PAGE_KERNEL, 1);
515
516 /* force the kernel to see the new TLB entries */
517 __flush_tlb_range(0, init_begin, init_end);
379 /* Attempt to catch anyone trying to execute code here 518 /* Attempt to catch anyone trying to execute code here
380 * by filling the page with BRK insns. 519 * by filling the page with BRK insns.
381 */ 520 */
382 memset((void *)init_begin, 0x00, init_end - init_begin); 521 memset((void *)init_begin, 0x00, init_end - init_begin);
522 /* finally dump all the instructions which were cached, since the
523 * pages are no-longer executable */
383 flush_icache_range(init_begin, init_end); 524 flush_icache_range(init_begin, init_end);
384#endif
385 525
386 /* align __init_begin and __init_end to page size,
387 ignoring linker script where we might have tried to save RAM */
388 init_begin = PAGE_ALIGN(init_begin);
389 init_end = PAGE_ALIGN(init_end);
390 for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) { 526 for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) {
391 ClearPageReserved(virt_to_page(addr)); 527 ClearPageReserved(virt_to_page(addr));
392 init_page_count(virt_to_page(addr)); 528 init_page_count(virt_to_page(addr));
@@ -616,114 +752,6 @@ void show_mem(unsigned int filter)
616#endif 752#endif
617} 753}
618 754
619
620static void __init map_pages(unsigned long start_vaddr, unsigned long start_paddr, unsigned long size, pgprot_t pgprot)
621{
622 pgd_t *pg_dir;
623 pmd_t *pmd;
624 pte_t *pg_table;
625 unsigned long end_paddr;
626 unsigned long start_pmd;
627 unsigned long start_pte;
628 unsigned long tmp1;
629 unsigned long tmp2;
630 unsigned long address;
631 unsigned long ro_start;
632 unsigned long ro_end;
633 unsigned long fv_addr;
634 unsigned long gw_addr;
635 extern const unsigned long fault_vector_20;
636 extern void * const linux_gateway_page;
637
638 ro_start = __pa((unsigned long)_text);
639 ro_end = __pa((unsigned long)&data_start);
640 fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
641 gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
642
643 end_paddr = start_paddr + size;
644
645 pg_dir = pgd_offset_k(start_vaddr);
646
647#if PTRS_PER_PMD == 1
648 start_pmd = 0;
649#else
650 start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
651#endif
652 start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
653
654 address = start_paddr;
655 while (address < end_paddr) {
656#if PTRS_PER_PMD == 1
657 pmd = (pmd_t *)__pa(pg_dir);
658#else
659 pmd = (pmd_t *)pgd_address(*pg_dir);
660
661 /*
662 * pmd is physical at this point
663 */
664
665 if (!pmd) {
666 pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE << PMD_ORDER);
667 pmd = (pmd_t *) __pa(pmd);
668 }
669
670 pgd_populate(NULL, pg_dir, __va(pmd));
671#endif
672 pg_dir++;
673
674 /* now change pmd to kernel virtual addresses */
675
676 pmd = (pmd_t *)__va(pmd) + start_pmd;
677 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++,pmd++) {
678
679 /*
680 * pg_table is physical at this point
681 */
682
683 pg_table = (pte_t *)pmd_address(*pmd);
684 if (!pg_table) {
685 pg_table = (pte_t *)
686 alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE);
687 pg_table = (pte_t *) __pa(pg_table);
688 }
689
690 pmd_populate_kernel(NULL, pmd, __va(pg_table));
691
692 /* now change pg_table to kernel virtual addresses */
693
694 pg_table = (pte_t *) __va(pg_table) + start_pte;
695 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) {
696 pte_t pte;
697
698 /*
699 * Map the fault vector writable so we can
700 * write the HPMC checksum.
701 */
702#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
703 if (address >= ro_start && address < ro_end
704 && address != fv_addr
705 && address != gw_addr)
706 pte = __mk_pte(address, PAGE_KERNEL_RO);
707 else
708#endif
709 pte = __mk_pte(address, pgprot);
710
711 if (address >= end_paddr)
712 pte_val(pte) = 0;
713
714 set_pte(pg_table, pte);
715
716 address += PAGE_SIZE;
717 }
718 start_pte = 0;
719
720 if (address >= end_paddr)
721 break;
722 }
723 start_pmd = 0;
724 }
725}
726
727/* 755/*
728 * pagetable_init() sets up the page tables 756 * pagetable_init() sets up the page tables
729 * 757 *
@@ -748,14 +776,14 @@ static void __init pagetable_init(void)
748 size = pmem_ranges[range].pages << PAGE_SHIFT; 776 size = pmem_ranges[range].pages << PAGE_SHIFT;
749 777
750 map_pages((unsigned long)__va(start_paddr), start_paddr, 778 map_pages((unsigned long)__va(start_paddr), start_paddr,
751 size, PAGE_KERNEL); 779 size, PAGE_KERNEL, 0);
752 } 780 }
753 781
754#ifdef CONFIG_BLK_DEV_INITRD 782#ifdef CONFIG_BLK_DEV_INITRD
755 if (initrd_end && initrd_end > mem_limit) { 783 if (initrd_end && initrd_end > mem_limit) {
756 printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end); 784 printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
757 map_pages(initrd_start, __pa(initrd_start), 785 map_pages(initrd_start, __pa(initrd_start),
758 initrd_end - initrd_start, PAGE_KERNEL); 786 initrd_end - initrd_start, PAGE_KERNEL, 0);
759 } 787 }
760#endif 788#endif
761 789
@@ -780,7 +808,7 @@ static void __init gateway_init(void)
780 */ 808 */
781 809
782 map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page), 810 map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
783 PAGE_SIZE, PAGE_GATEWAY); 811 PAGE_SIZE, PAGE_GATEWAY, 1);
784} 812}
785 813
786#ifdef CONFIG_HPUX 814#ifdef CONFIG_HPUX