aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
authorRob Herring <rob.herring@calxeda.com>2012-02-29 19:10:58 -0500
committerRob Herring <rob.herring@calxeda.com>2012-07-25 10:26:42 -0400
commitc2794437091a4fda72c4a4f3567dd728dcc0c3c9 (patch)
tree56e4d414f5b78bdf35822b38af5be021711a0970 /arch/arm/mm/mmu.c
parent701eb2647d7986b42fa973990649a83b3e15e8eb (diff)
ARM: Add fixed PCI i/o mapping
This adds a fixed virtual mapping for PCI i/o addresses. The mapping is located at the last 2MB of vmalloc region (0xfee00000-0xff000000). 2MB is used to align with PMD size, but IO_SPACE_LIMIT is 1MB. The space is reserved after .map_io and can be mapped at any time later with pci_ioremap_io. Platforms which need early i/o mapping (e.g. for vga console) can call pci_map_io_early in their .map_io function. This has changed completely from the 1st implementation which only supported creating the static mapping at .map_io. Signed-off-by: Rob Herring <rob.herring@calxeda.com> Cc: Russell King <linux@arm.linux.org.uk> Acked-by: Nicolas Pitre <nico@linaro.org>
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c54
1 files changed, 43 insertions, 11 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index cf4528d51774..714a7fd99ca3 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -31,6 +31,7 @@
31 31
32#include <asm/mach/arch.h> 32#include <asm/mach/arch.h>
33#include <asm/mach/map.h> 33#include <asm/mach/map.h>
34#include <asm/mach/pci.h>
34 35
35#include "mm.h" 36#include "mm.h"
36 37
@@ -216,7 +217,7 @@ static struct mem_type mem_types[] = {
216 .prot_l1 = PMD_TYPE_TABLE, 217 .prot_l1 = PMD_TYPE_TABLE,
217 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, 218 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
218 .domain = DOMAIN_IO, 219 .domain = DOMAIN_IO,
219 }, 220 },
220 [MT_DEVICE_WC] = { /* ioremap_wc */ 221 [MT_DEVICE_WC] = { /* ioremap_wc */
221 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, 222 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
222 .prot_l1 = PMD_TYPE_TABLE, 223 .prot_l1 = PMD_TYPE_TABLE,
@@ -783,14 +784,27 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
783 create_mapping(md); 784 create_mapping(md);
784 vm->addr = (void *)(md->virtual & PAGE_MASK); 785 vm->addr = (void *)(md->virtual & PAGE_MASK);
785 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); 786 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
786 vm->phys_addr = __pfn_to_phys(md->pfn); 787 vm->phys_addr = __pfn_to_phys(md->pfn);
787 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; 788 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
788 vm->flags |= VM_ARM_MTYPE(md->type); 789 vm->flags |= VM_ARM_MTYPE(md->type);
789 vm->caller = iotable_init; 790 vm->caller = iotable_init;
790 vm_area_add_early(vm++); 791 vm_area_add_early(vm++);
791 } 792 }
792} 793}
793 794
795void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
796 void *caller)
797{
798 struct vm_struct *vm;
799
800 vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
801 vm->addr = (void *)addr;
802 vm->size = size;
803 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
804 vm->caller = caller;
805 vm_area_add_early(vm);
806}
807
794#ifndef CONFIG_ARM_LPAE 808#ifndef CONFIG_ARM_LPAE
795 809
796/* 810/*
@@ -808,14 +822,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
808 822
809static void __init pmd_empty_section_gap(unsigned long addr) 823static void __init pmd_empty_section_gap(unsigned long addr)
810{ 824{
811 struct vm_struct *vm; 825 vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap);
812
813 vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
814 vm->addr = (void *)addr;
815 vm->size = SECTION_SIZE;
816 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
817 vm->caller = pmd_empty_section_gap;
818 vm_area_add_early(vm);
819} 826}
820 827
821static void __init fill_pmd_gaps(void) 828static void __init fill_pmd_gaps(void)
@@ -864,6 +871,28 @@ static void __init fill_pmd_gaps(void)
864#define fill_pmd_gaps() do { } while (0) 871#define fill_pmd_gaps() do { } while (0)
865#endif 872#endif
866 873
874#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
875static void __init pci_reserve_io(void)
876{
877 struct vm_struct *vm;
878 unsigned long addr;
879
880 /* we're still single threaded hence no lock needed here */
881 for (vm = vmlist; vm; vm = vm->next) {
882 if (!(vm->flags & VM_ARM_STATIC_MAPPING))
883 continue;
884 addr = (unsigned long)vm->addr;
885 addr &= ~(SZ_2M - 1);
886 if (addr == PCI_IO_VIRT_BASE)
887 return;
888
889 }
890 vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
891}
892#else
893#define pci_reserve_io() do { } while (0)
894#endif
895
867static void * __initdata vmalloc_min = 896static void * __initdata vmalloc_min =
868 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET); 897 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
869 898
@@ -1147,6 +1176,9 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
1147 mdesc->map_io(); 1176 mdesc->map_io();
1148 fill_pmd_gaps(); 1177 fill_pmd_gaps();
1149 1178
1179 /* Reserve fixed i/o space in VMALLOC region */
1180 pci_reserve_io();
1181
1150 /* 1182 /*
1151 * Finally flush the caches and tlb to ensure that we're in a 1183 * Finally flush the caches and tlb to ensure that we're in a
1152 * consistent state wrt the writebuffer. This also ensures that 1184 * consistent state wrt the writebuffer. This also ensures that