diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-22 15:38:40 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-22 15:38:40 -0400 |
commit | 17238005f377888b9d4583835205c77e3d781bb8 (patch) | |
tree | 9cad40902138d08a2694f87736cfebd031dc9542 /arch/parisc/mm | |
parent | 34ea646c9f8c18fd2e4332ff3b2b509f878c56f1 (diff) | |
parent | 7fe2ac6882f74e8bd5b3891f63c09cb10aee9d03 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/parisc-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/parisc-2.6:
[PARISC] wire up syncfs syscall
[PARISC] wire up the fhandle syscalls
[PARISC] wire up clock_adjtime syscall
[PARISC] wire up fanotify syscalls
[PARISC] prevent speculative re-read on cache flush
[PARISC] only make executable areas executable
[PARISC] fix pacache .size with new binutils
Diffstat (limited to 'arch/parisc/mm')
-rw-r--r-- | arch/parisc/mm/init.c | 260 |
1 files changed, 143 insertions, 117 deletions
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index b1d126258dee..5fa1e273006e 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c | |||
@@ -371,24 +371,158 @@ static void __init setup_bootmem(void) | |||
371 | request_resource(&sysram_resources[0], &pdcdata_resource); | 371 | request_resource(&sysram_resources[0], &pdcdata_resource); |
372 | } | 372 | } |
373 | 373 | ||
374 | static void __init map_pages(unsigned long start_vaddr, | ||
375 | unsigned long start_paddr, unsigned long size, | ||
376 | pgprot_t pgprot, int force) | ||
377 | { | ||
378 | pgd_t *pg_dir; | ||
379 | pmd_t *pmd; | ||
380 | pte_t *pg_table; | ||
381 | unsigned long end_paddr; | ||
382 | unsigned long start_pmd; | ||
383 | unsigned long start_pte; | ||
384 | unsigned long tmp1; | ||
385 | unsigned long tmp2; | ||
386 | unsigned long address; | ||
387 | unsigned long vaddr; | ||
388 | unsigned long ro_start; | ||
389 | unsigned long ro_end; | ||
390 | unsigned long fv_addr; | ||
391 | unsigned long gw_addr; | ||
392 | extern const unsigned long fault_vector_20; | ||
393 | extern void * const linux_gateway_page; | ||
394 | |||
395 | ro_start = __pa((unsigned long)_text); | ||
396 | ro_end = __pa((unsigned long)&data_start); | ||
397 | fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; | ||
398 | gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK; | ||
399 | |||
400 | end_paddr = start_paddr + size; | ||
401 | |||
402 | pg_dir = pgd_offset_k(start_vaddr); | ||
403 | |||
404 | #if PTRS_PER_PMD == 1 | ||
405 | start_pmd = 0; | ||
406 | #else | ||
407 | start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); | ||
408 | #endif | ||
409 | start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); | ||
410 | |||
411 | address = start_paddr; | ||
412 | vaddr = start_vaddr; | ||
413 | while (address < end_paddr) { | ||
414 | #if PTRS_PER_PMD == 1 | ||
415 | pmd = (pmd_t *)__pa(pg_dir); | ||
416 | #else | ||
417 | pmd = (pmd_t *)pgd_address(*pg_dir); | ||
418 | |||
419 | /* | ||
420 | * pmd is physical at this point | ||
421 | */ | ||
422 | |||
423 | if (!pmd) { | ||
424 | pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE << PMD_ORDER); | ||
425 | pmd = (pmd_t *) __pa(pmd); | ||
426 | } | ||
427 | |||
428 | pgd_populate(NULL, pg_dir, __va(pmd)); | ||
429 | #endif | ||
430 | pg_dir++; | ||
431 | |||
432 | /* now change pmd to kernel virtual addresses */ | ||
433 | |||
434 | pmd = (pmd_t *)__va(pmd) + start_pmd; | ||
435 | for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) { | ||
436 | |||
437 | /* | ||
438 | * pg_table is physical at this point | ||
439 | */ | ||
440 | |||
441 | pg_table = (pte_t *)pmd_address(*pmd); | ||
442 | if (!pg_table) { | ||
443 | pg_table = (pte_t *) | ||
444 | alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE); | ||
445 | pg_table = (pte_t *) __pa(pg_table); | ||
446 | } | ||
447 | |||
448 | pmd_populate_kernel(NULL, pmd, __va(pg_table)); | ||
449 | |||
450 | /* now change pg_table to kernel virtual addresses */ | ||
451 | |||
452 | pg_table = (pte_t *) __va(pg_table) + start_pte; | ||
453 | for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { | ||
454 | pte_t pte; | ||
455 | |||
456 | /* | ||
457 | * Map the fault vector writable so we can | ||
458 | * write the HPMC checksum. | ||
459 | */ | ||
460 | if (force) | ||
461 | pte = __mk_pte(address, pgprot); | ||
462 | else if (core_kernel_text(vaddr) && | ||
463 | address != fv_addr) | ||
464 | pte = __mk_pte(address, PAGE_KERNEL_EXEC); | ||
465 | else | ||
466 | #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) | ||
467 | if (address >= ro_start && address < ro_end | ||
468 | && address != fv_addr | ||
469 | && address != gw_addr) | ||
470 | pte = __mk_pte(address, PAGE_KERNEL_RO); | ||
471 | else | ||
472 | #endif | ||
473 | pte = __mk_pte(address, pgprot); | ||
474 | |||
475 | if (address >= end_paddr) { | ||
476 | if (force) | ||
477 | break; | ||
478 | else | ||
479 | pte_val(pte) = 0; | ||
480 | } | ||
481 | |||
482 | set_pte(pg_table, pte); | ||
483 | |||
484 | address += PAGE_SIZE; | ||
485 | vaddr += PAGE_SIZE; | ||
486 | } | ||
487 | start_pte = 0; | ||
488 | |||
489 | if (address >= end_paddr) | ||
490 | break; | ||
491 | } | ||
492 | start_pmd = 0; | ||
493 | } | ||
494 | } | ||
495 | |||
374 | void free_initmem(void) | 496 | void free_initmem(void) |
375 | { | 497 | { |
376 | unsigned long addr; | 498 | unsigned long addr; |
377 | unsigned long init_begin = (unsigned long)__init_begin; | 499 | unsigned long init_begin = (unsigned long)__init_begin; |
378 | unsigned long init_end = (unsigned long)__init_end; | 500 | unsigned long init_end = (unsigned long)__init_end; |
379 | 501 | ||
380 | #ifdef CONFIG_DEBUG_KERNEL | 502 | /* The init text pages are marked R-X. We have to |
503 | * flush the icache and mark them RW- | ||
504 | * | ||
505 | * This is tricky, because map_pages is in the init section. | ||
506 | * Do a dummy remap of the data section first (the data | ||
507 | * section is already PAGE_KERNEL) to pull in the TLB entries | ||
508 | * for map_kernel */ | ||
509 | map_pages(init_begin, __pa(init_begin), init_end - init_begin, | ||
510 | PAGE_KERNEL_RWX, 1); | ||
511 | /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute | ||
512 | * map_pages */ | ||
513 | map_pages(init_begin, __pa(init_begin), init_end - init_begin, | ||
514 | PAGE_KERNEL, 1); | ||
515 | |||
516 | /* force the kernel to see the new TLB entries */ | ||
517 | __flush_tlb_range(0, init_begin, init_end); | ||
381 | /* Attempt to catch anyone trying to execute code here | 518 | /* Attempt to catch anyone trying to execute code here |
382 | * by filling the page with BRK insns. | 519 | * by filling the page with BRK insns. |
383 | */ | 520 | */ |
384 | memset((void *)init_begin, 0x00, init_end - init_begin); | 521 | memset((void *)init_begin, 0x00, init_end - init_begin); |
522 | /* finally dump all the instructions which were cached, since the | ||
523 | * pages are no-longer executable */ | ||
385 | flush_icache_range(init_begin, init_end); | 524 | flush_icache_range(init_begin, init_end); |
386 | #endif | ||
387 | 525 | ||
388 | /* align __init_begin and __init_end to page size, | ||
389 | ignoring linker script where we might have tried to save RAM */ | ||
390 | init_begin = PAGE_ALIGN(init_begin); | ||
391 | init_end = PAGE_ALIGN(init_end); | ||
392 | for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) { | 526 | for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) { |
393 | ClearPageReserved(virt_to_page(addr)); | 527 | ClearPageReserved(virt_to_page(addr)); |
394 | init_page_count(virt_to_page(addr)); | 528 | init_page_count(virt_to_page(addr)); |
@@ -618,114 +752,6 @@ void show_mem(unsigned int filter) | |||
618 | #endif | 752 | #endif |
619 | } | 753 | } |
620 | 754 | ||
621 | |||
622 | static void __init map_pages(unsigned long start_vaddr, unsigned long start_paddr, unsigned long size, pgprot_t pgprot) | ||
623 | { | ||
624 | pgd_t *pg_dir; | ||
625 | pmd_t *pmd; | ||
626 | pte_t *pg_table; | ||
627 | unsigned long end_paddr; | ||
628 | unsigned long start_pmd; | ||
629 | unsigned long start_pte; | ||
630 | unsigned long tmp1; | ||
631 | unsigned long tmp2; | ||
632 | unsigned long address; | ||
633 | unsigned long ro_start; | ||
634 | unsigned long ro_end; | ||
635 | unsigned long fv_addr; | ||
636 | unsigned long gw_addr; | ||
637 | extern const unsigned long fault_vector_20; | ||
638 | extern void * const linux_gateway_page; | ||
639 | |||
640 | ro_start = __pa((unsigned long)_text); | ||
641 | ro_end = __pa((unsigned long)&data_start); | ||
642 | fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; | ||
643 | gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK; | ||
644 | |||
645 | end_paddr = start_paddr + size; | ||
646 | |||
647 | pg_dir = pgd_offset_k(start_vaddr); | ||
648 | |||
649 | #if PTRS_PER_PMD == 1 | ||
650 | start_pmd = 0; | ||
651 | #else | ||
652 | start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); | ||
653 | #endif | ||
654 | start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); | ||
655 | |||
656 | address = start_paddr; | ||
657 | while (address < end_paddr) { | ||
658 | #if PTRS_PER_PMD == 1 | ||
659 | pmd = (pmd_t *)__pa(pg_dir); | ||
660 | #else | ||
661 | pmd = (pmd_t *)pgd_address(*pg_dir); | ||
662 | |||
663 | /* | ||
664 | * pmd is physical at this point | ||
665 | */ | ||
666 | |||
667 | if (!pmd) { | ||
668 | pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE << PMD_ORDER); | ||
669 | pmd = (pmd_t *) __pa(pmd); | ||
670 | } | ||
671 | |||
672 | pgd_populate(NULL, pg_dir, __va(pmd)); | ||
673 | #endif | ||
674 | pg_dir++; | ||
675 | |||
676 | /* now change pmd to kernel virtual addresses */ | ||
677 | |||
678 | pmd = (pmd_t *)__va(pmd) + start_pmd; | ||
679 | for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++,pmd++) { | ||
680 | |||
681 | /* | ||
682 | * pg_table is physical at this point | ||
683 | */ | ||
684 | |||
685 | pg_table = (pte_t *)pmd_address(*pmd); | ||
686 | if (!pg_table) { | ||
687 | pg_table = (pte_t *) | ||
688 | alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE); | ||
689 | pg_table = (pte_t *) __pa(pg_table); | ||
690 | } | ||
691 | |||
692 | pmd_populate_kernel(NULL, pmd, __va(pg_table)); | ||
693 | |||
694 | /* now change pg_table to kernel virtual addresses */ | ||
695 | |||
696 | pg_table = (pte_t *) __va(pg_table) + start_pte; | ||
697 | for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) { | ||
698 | pte_t pte; | ||
699 | |||
700 | /* | ||
701 | * Map the fault vector writable so we can | ||
702 | * write the HPMC checksum. | ||
703 | */ | ||
704 | #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) | ||
705 | if (address >= ro_start && address < ro_end | ||
706 | && address != fv_addr | ||
707 | && address != gw_addr) | ||
708 | pte = __mk_pte(address, PAGE_KERNEL_RO); | ||
709 | else | ||
710 | #endif | ||
711 | pte = __mk_pte(address, pgprot); | ||
712 | |||
713 | if (address >= end_paddr) | ||
714 | pte_val(pte) = 0; | ||
715 | |||
716 | set_pte(pg_table, pte); | ||
717 | |||
718 | address += PAGE_SIZE; | ||
719 | } | ||
720 | start_pte = 0; | ||
721 | |||
722 | if (address >= end_paddr) | ||
723 | break; | ||
724 | } | ||
725 | start_pmd = 0; | ||
726 | } | ||
727 | } | ||
728 | |||
729 | /* | 755 | /* |
730 | * pagetable_init() sets up the page tables | 756 | * pagetable_init() sets up the page tables |
731 | * | 757 | * |
@@ -750,14 +776,14 @@ static void __init pagetable_init(void) | |||
750 | size = pmem_ranges[range].pages << PAGE_SHIFT; | 776 | size = pmem_ranges[range].pages << PAGE_SHIFT; |
751 | 777 | ||
752 | map_pages((unsigned long)__va(start_paddr), start_paddr, | 778 | map_pages((unsigned long)__va(start_paddr), start_paddr, |
753 | size, PAGE_KERNEL); | 779 | size, PAGE_KERNEL, 0); |
754 | } | 780 | } |
755 | 781 | ||
756 | #ifdef CONFIG_BLK_DEV_INITRD | 782 | #ifdef CONFIG_BLK_DEV_INITRD |
757 | if (initrd_end && initrd_end > mem_limit) { | 783 | if (initrd_end && initrd_end > mem_limit) { |
758 | printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end); | 784 | printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end); |
759 | map_pages(initrd_start, __pa(initrd_start), | 785 | map_pages(initrd_start, __pa(initrd_start), |
760 | initrd_end - initrd_start, PAGE_KERNEL); | 786 | initrd_end - initrd_start, PAGE_KERNEL, 0); |
761 | } | 787 | } |
762 | #endif | 788 | #endif |
763 | 789 | ||
@@ -782,7 +808,7 @@ static void __init gateway_init(void) | |||
782 | */ | 808 | */ |
783 | 809 | ||
784 | map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page), | 810 | map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page), |
785 | PAGE_SIZE, PAGE_GATEWAY); | 811 | PAGE_SIZE, PAGE_GATEWAY, 1); |
786 | } | 812 | } |
787 | 813 | ||
788 | #ifdef CONFIG_HPUX | 814 | #ifdef CONFIG_HPUX |