aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/ioremap.c14
-rw-r--r--arch/arm/mm/mmu.c54
2 files changed, 57 insertions, 11 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 566750fa57d4..9d869f93a3da 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -36,6 +36,7 @@
36#include <asm/system_info.h> 36#include <asm/system_info.h>
37 37
38#include <asm/mach/map.h> 38#include <asm/mach/map.h>
39#include <asm/mach/pci.h>
39#include "mm.h" 40#include "mm.h"
40 41
41int ioremap_page(unsigned long virt, unsigned long phys, 42int ioremap_page(unsigned long virt, unsigned long phys,
@@ -383,3 +384,16 @@ void __arm_iounmap(volatile void __iomem *io_addr)
383 arch_iounmap(io_addr); 384 arch_iounmap(io_addr);
384} 385}
385EXPORT_SYMBOL(__arm_iounmap); 386EXPORT_SYMBOL(__arm_iounmap);
387
388#ifdef CONFIG_PCI
389int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
390{
391 BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
392
393 return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
394 PCI_IO_VIRT_BASE + offset + SZ_64K,
395 phys_addr,
396 __pgprot(get_mem_type(MT_DEVICE)->prot_pte));
397}
398EXPORT_SYMBOL_GPL(pci_ioremap_io);
399#endif
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index c2fa21d0103e..18144e6a3115 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -31,6 +31,7 @@
31 31
32#include <asm/mach/arch.h> 32#include <asm/mach/arch.h>
33#include <asm/mach/map.h> 33#include <asm/mach/map.h>
34#include <asm/mach/pci.h>
34 35
35#include "mm.h" 36#include "mm.h"
36 37
@@ -216,7 +217,7 @@ static struct mem_type mem_types[] = {
216 .prot_l1 = PMD_TYPE_TABLE, 217 .prot_l1 = PMD_TYPE_TABLE,
217 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, 218 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
218 .domain = DOMAIN_IO, 219 .domain = DOMAIN_IO,
219 }, 220 },
220 [MT_DEVICE_WC] = { /* ioremap_wc */ 221 [MT_DEVICE_WC] = { /* ioremap_wc */
221 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, 222 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
222 .prot_l1 = PMD_TYPE_TABLE, 223 .prot_l1 = PMD_TYPE_TABLE,
@@ -777,14 +778,27 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
777 create_mapping(md); 778 create_mapping(md);
778 vm->addr = (void *)(md->virtual & PAGE_MASK); 779 vm->addr = (void *)(md->virtual & PAGE_MASK);
779 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); 780 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
780 vm->phys_addr = __pfn_to_phys(md->pfn); 781 vm->phys_addr = __pfn_to_phys(md->pfn);
781 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; 782 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
782 vm->flags |= VM_ARM_MTYPE(md->type); 783 vm->flags |= VM_ARM_MTYPE(md->type);
783 vm->caller = iotable_init; 784 vm->caller = iotable_init;
784 vm_area_add_early(vm++); 785 vm_area_add_early(vm++);
785 } 786 }
786} 787}
787 788
789void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
790 void *caller)
791{
792 struct vm_struct *vm;
793
794 vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
795 vm->addr = (void *)addr;
796 vm->size = size;
797 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
798 vm->caller = caller;
799 vm_area_add_early(vm);
800}
801
788#ifndef CONFIG_ARM_LPAE 802#ifndef CONFIG_ARM_LPAE
789 803
790/* 804/*
@@ -802,14 +816,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
802 816
803static void __init pmd_empty_section_gap(unsigned long addr) 817static void __init pmd_empty_section_gap(unsigned long addr)
804{ 818{
805 struct vm_struct *vm; 819 vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap);
806
807 vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
808 vm->addr = (void *)addr;
809 vm->size = SECTION_SIZE;
810 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
811 vm->caller = pmd_empty_section_gap;
812 vm_area_add_early(vm);
813} 820}
814 821
815static void __init fill_pmd_gaps(void) 822static void __init fill_pmd_gaps(void)
@@ -858,6 +865,28 @@ static void __init fill_pmd_gaps(void)
858#define fill_pmd_gaps() do { } while (0) 865#define fill_pmd_gaps() do { } while (0)
859#endif 866#endif
860 867
868#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
869static void __init pci_reserve_io(void)
870{
871 struct vm_struct *vm;
872 unsigned long addr;
873
874 /* we're still single threaded hence no lock needed here */
875 for (vm = vmlist; vm; vm = vm->next) {
876 if (!(vm->flags & VM_ARM_STATIC_MAPPING))
877 continue;
878 addr = (unsigned long)vm->addr;
879 addr &= ~(SZ_2M - 1);
880 if (addr == PCI_IO_VIRT_BASE)
881 return;
882
883 }
884 vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
885}
886#else
887#define pci_reserve_io() do { } while (0)
888#endif
889
861static void * __initdata vmalloc_min = 890static void * __initdata vmalloc_min =
862 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET); 891 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
863 892
@@ -1141,6 +1170,9 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
1141 mdesc->map_io(); 1170 mdesc->map_io();
1142 fill_pmd_gaps(); 1171 fill_pmd_gaps();
1143 1172
1173 /* Reserve fixed i/o space in VMALLOC region */
1174 pci_reserve_io();
1175
1144 /* 1176 /*
1145 * Finally flush the caches and tlb to ensure that we're in a 1177 * Finally flush the caches and tlb to ensure that we're in a
1146 * consistent state wrt the writebuffer. This also ensures that 1178 * consistent state wrt the writebuffer. This also ensures that