diff options
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r-- | arch/arm/mm/mmu.c | 65 |
1 files changed, 43 insertions, 22 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index c2fa21d0103e..941dfb9e9a78 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -31,6 +31,7 @@ | |||
31 | 31 | ||
32 | #include <asm/mach/arch.h> | 32 | #include <asm/mach/arch.h> |
33 | #include <asm/mach/map.h> | 33 | #include <asm/mach/map.h> |
34 | #include <asm/mach/pci.h> | ||
34 | 35 | ||
35 | #include "mm.h" | 36 | #include "mm.h" |
36 | 37 | ||
@@ -216,7 +217,7 @@ static struct mem_type mem_types[] = { | |||
216 | .prot_l1 = PMD_TYPE_TABLE, | 217 | .prot_l1 = PMD_TYPE_TABLE, |
217 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, | 218 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, |
218 | .domain = DOMAIN_IO, | 219 | .domain = DOMAIN_IO, |
219 | }, | 220 | }, |
220 | [MT_DEVICE_WC] = { /* ioremap_wc */ | 221 | [MT_DEVICE_WC] = { /* ioremap_wc */ |
221 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, | 222 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, |
222 | .prot_l1 = PMD_TYPE_TABLE, | 223 | .prot_l1 = PMD_TYPE_TABLE, |
@@ -422,17 +423,6 @@ static void __init build_mem_type_table(void) | |||
422 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; | 423 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; |
423 | 424 | ||
424 | /* | 425 | /* |
425 | * Enable CPU-specific coherency if supported. | ||
426 | * (Only available on XSC3 at the moment.) | ||
427 | */ | ||
428 | if (arch_is_coherent() && cpu_is_xsc3()) { | ||
429 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | ||
430 | mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; | ||
431 | mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED; | ||
432 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; | ||
433 | mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; | ||
434 | } | ||
435 | /* | ||
436 | * ARMv6 and above have extended page tables. | 426 | * ARMv6 and above have extended page tables. |
437 | */ | 427 | */ |
438 | if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { | 428 | if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { |
@@ -777,14 +767,27 @@ void __init iotable_init(struct map_desc *io_desc, int nr) | |||
777 | create_mapping(md); | 767 | create_mapping(md); |
778 | vm->addr = (void *)(md->virtual & PAGE_MASK); | 768 | vm->addr = (void *)(md->virtual & PAGE_MASK); |
779 | vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); | 769 | vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); |
780 | vm->phys_addr = __pfn_to_phys(md->pfn); | 770 | vm->phys_addr = __pfn_to_phys(md->pfn); |
781 | vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; | 771 | vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; |
782 | vm->flags |= VM_ARM_MTYPE(md->type); | 772 | vm->flags |= VM_ARM_MTYPE(md->type); |
783 | vm->caller = iotable_init; | 773 | vm->caller = iotable_init; |
784 | vm_area_add_early(vm++); | 774 | vm_area_add_early(vm++); |
785 | } | 775 | } |
786 | } | 776 | } |
787 | 777 | ||
778 | void __init vm_reserve_area_early(unsigned long addr, unsigned long size, | ||
779 | void *caller) | ||
780 | { | ||
781 | struct vm_struct *vm; | ||
782 | |||
783 | vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); | ||
784 | vm->addr = (void *)addr; | ||
785 | vm->size = size; | ||
786 | vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; | ||
787 | vm->caller = caller; | ||
788 | vm_area_add_early(vm); | ||
789 | } | ||
790 | |||
788 | #ifndef CONFIG_ARM_LPAE | 791 | #ifndef CONFIG_ARM_LPAE |
789 | 792 | ||
790 | /* | 793 | /* |
@@ -802,14 +805,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr) | |||
802 | 805 | ||
803 | static void __init pmd_empty_section_gap(unsigned long addr) | 806 | static void __init pmd_empty_section_gap(unsigned long addr) |
804 | { | 807 | { |
805 | struct vm_struct *vm; | 808 | vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap); |
806 | |||
807 | vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); | ||
808 | vm->addr = (void *)addr; | ||
809 | vm->size = SECTION_SIZE; | ||
810 | vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; | ||
811 | vm->caller = pmd_empty_section_gap; | ||
812 | vm_area_add_early(vm); | ||
813 | } | 809 | } |
814 | 810 | ||
815 | static void __init fill_pmd_gaps(void) | 811 | static void __init fill_pmd_gaps(void) |
@@ -858,6 +854,28 @@ static void __init fill_pmd_gaps(void) | |||
858 | #define fill_pmd_gaps() do { } while (0) | 854 | #define fill_pmd_gaps() do { } while (0) |
859 | #endif | 855 | #endif |
860 | 856 | ||
857 | #if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H) | ||
858 | static void __init pci_reserve_io(void) | ||
859 | { | ||
860 | struct vm_struct *vm; | ||
861 | unsigned long addr; | ||
862 | |||
863 | /* we're still single threaded hence no lock needed here */ | ||
864 | for (vm = vmlist; vm; vm = vm->next) { | ||
865 | if (!(vm->flags & VM_ARM_STATIC_MAPPING)) | ||
866 | continue; | ||
867 | addr = (unsigned long)vm->addr; | ||
868 | addr &= ~(SZ_2M - 1); | ||
869 | if (addr == PCI_IO_VIRT_BASE) | ||
870 | return; | ||
871 | |||
872 | } | ||
873 | vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); | ||
874 | } | ||
875 | #else | ||
876 | #define pci_reserve_io() do { } while (0) | ||
877 | #endif | ||
878 | |||
861 | static void * __initdata vmalloc_min = | 879 | static void * __initdata vmalloc_min = |
862 | (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET); | 880 | (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET); |
863 | 881 | ||
@@ -1141,6 +1159,9 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
1141 | mdesc->map_io(); | 1159 | mdesc->map_io(); |
1142 | fill_pmd_gaps(); | 1160 | fill_pmd_gaps(); |
1143 | 1161 | ||
1162 | /* Reserve fixed i/o space in VMALLOC region */ | ||
1163 | pci_reserve_io(); | ||
1164 | |||
1144 | /* | 1165 | /* |
1145 | * Finally flush the caches and tlb to ensure that we're in a | 1166 | * Finally flush the caches and tlb to ensure that we're in a |
1146 | * consistent state wrt the writebuffer. This also ensures that | 1167 | * consistent state wrt the writebuffer. This also ensures that |