aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c128
1 files changed, 73 insertions, 55 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 7f36c825718..9b36c5cb5e9 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -17,6 +17,7 @@
17 17
18#include <asm/cputype.h> 18#include <asm/cputype.h>
19#include <asm/mach-types.h> 19#include <asm/mach-types.h>
20#include <asm/sections.h>
20#include <asm/setup.h> 21#include <asm/setup.h>
21#include <asm/sizes.h> 22#include <asm/sizes.h>
22#include <asm/tlb.h> 23#include <asm/tlb.h>
@@ -646,61 +647,79 @@ static void __init early_vmalloc(char **arg)
646 "vmalloc area too small, limiting to %luMB\n", 647 "vmalloc area too small, limiting to %luMB\n",
647 vmalloc_reserve >> 20); 648 vmalloc_reserve >> 20);
648 } 649 }
650
651 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
652 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
653 printk(KERN_WARNING
654 "vmalloc area is too big, limiting to %luMB\n",
655 vmalloc_reserve >> 20);
656 }
649} 657}
650__early_param("vmalloc=", early_vmalloc); 658__early_param("vmalloc=", early_vmalloc);
651 659
652#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) 660#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
653 661
654static int __init check_membank_valid(struct membank *mb) 662static void __init sanity_check_meminfo(void)
655{ 663{
656 /* 664 int i, j;
657 * Check whether this memory region has non-zero size or
658 * invalid node number.
659 */
660 if (mb->size == 0 || mb->node >= MAX_NUMNODES)
661 return 0;
662
663 /*
664 * Check whether this memory region would entirely overlap
665 * the vmalloc area.
666 */
667 if (phys_to_virt(mb->start) >= VMALLOC_MIN) {
668 printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
669 "(vmalloc region overlap).\n",
670 mb->start, mb->start + mb->size - 1);
671 return 0;
672 }
673
674 /*
675 * Check whether this memory region would partially overlap
676 * the vmalloc area.
677 */
678 if (phys_to_virt(mb->start + mb->size) < phys_to_virt(mb->start) ||
679 phys_to_virt(mb->start + mb->size) > VMALLOC_MIN) {
680 unsigned long newsize = VMALLOC_MIN - phys_to_virt(mb->start);
681
682 printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx "
683 "to -%.8lx (vmalloc region overlap).\n",
684 mb->start, mb->start + mb->size - 1,
685 mb->start + newsize - 1);
686 mb->size = newsize;
687 }
688 665
689 return 1; 666 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
690} 667 struct membank *bank = &meminfo.bank[j];
668 *bank = meminfo.bank[i];
691 669
692static void __init sanity_check_meminfo(struct meminfo *mi) 670#ifdef CONFIG_HIGHMEM
693{ 671 /*
694 int i, j; 672 * Split those memory banks which are partially overlapping
673 * the vmalloc area greatly simplifying things later.
674 */
675 if (__va(bank->start) < VMALLOC_MIN &&
676 bank->size > VMALLOC_MIN - __va(bank->start)) {
677 if (meminfo.nr_banks >= NR_BANKS) {
678 printk(KERN_CRIT "NR_BANKS too low, "
679 "ignoring high memory\n");
680 } else {
681 memmove(bank + 1, bank,
682 (meminfo.nr_banks - i) * sizeof(*bank));
683 meminfo.nr_banks++;
684 i++;
685 bank[1].size -= VMALLOC_MIN - __va(bank->start);
686 bank[1].start = __pa(VMALLOC_MIN - 1) + 1;
687 j++;
688 }
689 bank->size = VMALLOC_MIN - __va(bank->start);
690 }
691#else
692 /*
693 * Check whether this memory bank would entirely overlap
694 * the vmalloc area.
695 */
696 if (__va(bank->start) >= VMALLOC_MIN) {
697 printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
698 "(vmalloc region overlap).\n",
699 bank->start, bank->start + bank->size - 1);
700 continue;
701 }
695 702
696 for (i = 0, j = 0; i < mi->nr_banks; i++) { 703 /*
697 if (check_membank_valid(&mi->bank[i])) 704 * Check whether this memory bank would partially overlap
698 mi->bank[j++] = mi->bank[i]; 705 * the vmalloc area.
706 */
707 if (__va(bank->start + bank->size) > VMALLOC_MIN ||
708 __va(bank->start + bank->size) < __va(bank->start)) {
709 unsigned long newsize = VMALLOC_MIN - __va(bank->start);
710 printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx "
711 "to -%.8lx (vmalloc region overlap).\n",
712 bank->start, bank->start + bank->size - 1,
713 bank->start + newsize - 1);
714 bank->size = newsize;
715 }
716#endif
717 j++;
699 } 718 }
700 mi->nr_banks = j; 719 meminfo.nr_banks = j;
701} 720}
702 721
703static inline void prepare_page_table(struct meminfo *mi) 722static inline void prepare_page_table(void)
704{ 723{
705 unsigned long addr; 724 unsigned long addr;
706 725
@@ -712,7 +731,7 @@ static inline void prepare_page_table(struct meminfo *mi)
712 731
713#ifdef CONFIG_XIP_KERNEL 732#ifdef CONFIG_XIP_KERNEL
714 /* The XIP kernel is mapped in the module area -- skip over it */ 733 /* The XIP kernel is mapped in the module area -- skip over it */
715 addr = ((unsigned long)&_etext + PGDIR_SIZE - 1) & PGDIR_MASK; 734 addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
716#endif 735#endif
717 for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE) 736 for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
718 pmd_clear(pmd_off_k(addr)); 737 pmd_clear(pmd_off_k(addr));
@@ -721,7 +740,7 @@ static inline void prepare_page_table(struct meminfo *mi)
721 * Clear out all the kernel space mappings, except for the first 740 * Clear out all the kernel space mappings, except for the first
722 * memory bank, up to the end of the vmalloc region. 741 * memory bank, up to the end of the vmalloc region.
723 */ 742 */
724 for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size); 743 for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0]));
725 addr < VMALLOC_END; addr += PGDIR_SIZE) 744 addr < VMALLOC_END; addr += PGDIR_SIZE)
726 pmd_clear(pmd_off_k(addr)); 745 pmd_clear(pmd_off_k(addr));
727} 746}
@@ -738,10 +757,10 @@ void __init reserve_node_zero(pg_data_t *pgdat)
738 * Note that this can only be in node 0. 757 * Note that this can only be in node 0.
739 */ 758 */
740#ifdef CONFIG_XIP_KERNEL 759#ifdef CONFIG_XIP_KERNEL
741 reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start, 760 reserve_bootmem_node(pgdat, __pa(_data), _end - _data,
742 BOOTMEM_DEFAULT); 761 BOOTMEM_DEFAULT);
743#else 762#else
744 reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext, 763 reserve_bootmem_node(pgdat, __pa(_stext), _end - _stext,
745 BOOTMEM_DEFAULT); 764 BOOTMEM_DEFAULT);
746#endif 765#endif
747 766
@@ -808,7 +827,6 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
808 * Allocate the vector page early. 827 * Allocate the vector page early.
809 */ 828 */
810 vectors = alloc_bootmem_low_pages(PAGE_SIZE); 829 vectors = alloc_bootmem_low_pages(PAGE_SIZE);
811 BUG_ON(!vectors);
812 830
813 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) 831 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
814 pmd_clear(pmd_off_k(addr)); 832 pmd_clear(pmd_off_k(addr));
@@ -820,7 +838,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
820#ifdef CONFIG_XIP_KERNEL 838#ifdef CONFIG_XIP_KERNEL
821 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); 839 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
822 map.virtual = MODULES_VADDR; 840 map.virtual = MODULES_VADDR;
823 map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; 841 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
824 map.type = MT_ROM; 842 map.type = MT_ROM;
825 create_mapping(&map); 843 create_mapping(&map);
826#endif 844#endif
@@ -880,23 +898,23 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
880 * paging_init() sets up the page tables, initialises the zone memory 898 * paging_init() sets up the page tables, initialises the zone memory
881 * maps, and sets up the zero page, bad page and bad page tables. 899 * maps, and sets up the zero page, bad page and bad page tables.
882 */ 900 */
883void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) 901void __init paging_init(struct machine_desc *mdesc)
884{ 902{
885 void *zero_page; 903 void *zero_page;
886 904
887 build_mem_type_table(); 905 build_mem_type_table();
888 sanity_check_meminfo(mi); 906 sanity_check_meminfo();
889 prepare_page_table(mi); 907 prepare_page_table();
890 bootmem_init(mi); 908 bootmem_init();
891 devicemaps_init(mdesc); 909 devicemaps_init(mdesc);
892 910
893 top_pmd = pmd_off_k(0xffff0000); 911 top_pmd = pmd_off_k(0xffff0000);
894 912
895 /* 913 /*
896 * allocate the zero page. Note that we count on this going ok. 914 * allocate the zero page. Note that this always succeeds and
915 * returns a zeroed result.
897 */ 916 */
898 zero_page = alloc_bootmem_low_pages(PAGE_SIZE); 917 zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
899 memzero(zero_page, PAGE_SIZE);
900 empty_zero_page = virt_to_page(zero_page); 918 empty_zero_page = virt_to_page(zero_page);
901 flush_dcache_page(empty_zero_page); 919 flush_dcache_page(empty_zero_page);
902} 920}