diff options
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r-- | arch/arm/mm/mmu.c | 74 |
1 files changed, 74 insertions, 0 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e5dad60b558b..cf4528d51774 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -791,6 +791,79 @@ void __init iotable_init(struct map_desc *io_desc, int nr) | |||
791 | } | 791 | } |
792 | } | 792 | } |
793 | 793 | ||
794 | #ifndef CONFIG_ARM_LPAE | ||
795 | |||
796 | /* | ||
797 | * The Linux PMD is made of two consecutive section entries covering 2MB | ||
798 | * (see definition in include/asm/pgtable-2level.h). However a call to | ||
799 | * create_mapping() may optimize static mappings by using individual | ||
800 | * 1MB section mappings. This leaves the actual PMD potentially half | ||
801 | * initialized if the top or bottom section entry isn't used, leaving it | ||
802 | * open to problems if a subsequent ioremap() or vmalloc() tries to use | ||
803 | * the virtual space left free by that unused section entry. | ||
804 | * | ||
805 | * Let's avoid the issue by inserting dummy vm entries covering the unused | ||
806 | * PMD halves once the static mappings are in place. | ||
807 | */ | ||
808 | |||
809 | static void __init pmd_empty_section_gap(unsigned long addr) | ||
810 | { | ||
811 | struct vm_struct *vm; | ||
812 | |||
813 | vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); | ||
814 | vm->addr = (void *)addr; | ||
815 | vm->size = SECTION_SIZE; | ||
816 | vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; | ||
817 | vm->caller = pmd_empty_section_gap; | ||
818 | vm_area_add_early(vm); | ||
819 | } | ||
820 | |||
821 | static void __init fill_pmd_gaps(void) | ||
822 | { | ||
823 | struct vm_struct *vm; | ||
824 | unsigned long addr, next = 0; | ||
825 | pmd_t *pmd; | ||
826 | |||
827 | /* we're still single threaded hence no lock needed here */ | ||
828 | for (vm = vmlist; vm; vm = vm->next) { | ||
829 | if (!(vm->flags & VM_ARM_STATIC_MAPPING)) | ||
830 | continue; | ||
831 | addr = (unsigned long)vm->addr; | ||
832 | if (addr < next) | ||
833 | continue; | ||
834 | |||
835 | /* | ||
836 | * Check if this vm starts on an odd section boundary. | ||
837 | * If so and the first section entry for this PMD is free | ||
838 | * then we block the corresponding virtual address. | ||
839 | */ | ||
840 | if ((addr & ~PMD_MASK) == SECTION_SIZE) { | ||
841 | pmd = pmd_off_k(addr); | ||
842 | if (pmd_none(*pmd)) | ||
843 | pmd_empty_section_gap(addr & PMD_MASK); | ||
844 | } | ||
845 | |||
846 | /* | ||
847 | * Then check if this vm ends on an odd section boundary. | ||
848 | * If so and the second section entry for this PMD is empty | ||
849 | * then we block the corresponding virtual address. | ||
850 | */ | ||
851 | addr += vm->size; | ||
852 | if ((addr & ~PMD_MASK) == SECTION_SIZE) { | ||
853 | pmd = pmd_off_k(addr) + 1; | ||
854 | if (pmd_none(*pmd)) | ||
855 | pmd_empty_section_gap(addr); | ||
856 | } | ||
857 | |||
858 | /* no need to look at any vm entry until we hit the next PMD */ | ||
859 | next = (addr + PMD_SIZE - 1) & PMD_MASK; | ||
860 | } | ||
861 | } | ||
862 | |||
863 | #else | ||
864 | #define fill_pmd_gaps() do { } while (0) | ||
865 | #endif | ||
866 | |||
794 | static void * __initdata vmalloc_min = | 867 | static void * __initdata vmalloc_min = |
795 | (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET); | 868 | (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET); |
796 | 869 | ||
@@ -1072,6 +1145,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
1072 | */ | 1145 | */ |
1073 | if (mdesc->map_io) | 1146 | if (mdesc->map_io) |
1074 | mdesc->map_io(); | 1147 | mdesc->map_io(); |
1148 | fill_pmd_gaps(); | ||
1075 | 1149 | ||
1076 | /* | 1150 | /* |
1077 | * Finally flush the caches and tlb to ensure that we're in a | 1151 | * Finally flush the caches and tlb to ensure that we're in a |