aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/arm/memory.txt3
-rw-r--r--arch/arm/include/asm/io.h8
-rw-r--r--arch/arm/include/asm/mach/map.h8
-rw-r--r--arch/arm/include/asm/mach/pci.h10
-rw-r--r--arch/arm/kernel/bios32.c13
-rw-r--r--arch/arm/mm/ioremap.c14
-rw-r--r--arch/arm/mm/mmu.c54
7 files changed, 99 insertions, 11 deletions
diff --git a/Documentation/arm/memory.txt b/Documentation/arm/memory.txt
index 208a2d465b92..4bfb9ffbdbc1 100644
--- a/Documentation/arm/memory.txt
+++ b/Documentation/arm/memory.txt
@@ -51,6 +51,9 @@ ffc00000 ffefffff DMA memory mapping region. Memory returned
51ff000000 ffbfffff Reserved for future expansion of DMA 51ff000000 ffbfffff Reserved for future expansion of DMA
52 mapping region. 52 mapping region.
53 53
54fee00000 feffffff Mapping of PCI I/O space. This is a static
55 mapping within the vmalloc space.
56
54VMALLOC_START VMALLOC_END-1 vmalloc() / ioremap() space. 57VMALLOC_START VMALLOC_END-1 vmalloc() / ioremap() space.
55 Memory returned by vmalloc/ioremap will 58 Memory returned by vmalloc/ioremap will
56 be dynamically placed in this region. 59 be dynamically placed in this region.
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 815c669fec0a..8f4db67533e5 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -113,11 +113,19 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
113#define __iowmb() do { } while (0) 113#define __iowmb() do { } while (0)
114#endif 114#endif
115 115
116/* PCI fixed i/o mapping */
117#define PCI_IO_VIRT_BASE 0xfee00000
118
119extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);
120
116/* 121/*
117 * Now, pick up the machine-defined IO definitions 122 * Now, pick up the machine-defined IO definitions
118 */ 123 */
119#ifdef CONFIG_NEED_MACH_IO_H 124#ifdef CONFIG_NEED_MACH_IO_H
120#include <mach/io.h> 125#include <mach/io.h>
126#elif defined(CONFIG_PCI)
127#define IO_SPACE_LIMIT ((resource_size_t)0xfffff)
128#define __io(a) __typesafe_io(PCI_IO_VIRT_BASE + ((a) & IO_SPACE_LIMIT))
121#else 129#else
122#define __io(a) __typesafe_io((a) & IO_SPACE_LIMIT) 130#define __io(a) __typesafe_io((a) & IO_SPACE_LIMIT)
123#endif 131#endif
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index a6efcdd6fd25..195ac2f9d3d3 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -9,6 +9,9 @@
9 * 9 *
10 * Page table mapping constructs and function prototypes 10 * Page table mapping constructs and function prototypes
11 */ 11 */
12#ifndef __ASM_MACH_MAP_H
13#define __ASM_MACH_MAP_H
14
12#include <asm/io.h> 15#include <asm/io.h>
13 16
14struct map_desc { 17struct map_desc {
@@ -34,6 +37,8 @@ struct map_desc {
34 37
35#ifdef CONFIG_MMU 38#ifdef CONFIG_MMU
36extern void iotable_init(struct map_desc *, int); 39extern void iotable_init(struct map_desc *, int);
40extern void vm_reserve_area_early(unsigned long addr, unsigned long size,
41 void *caller);
37 42
38struct mem_type; 43struct mem_type;
39extern const struct mem_type *get_mem_type(unsigned int type); 44extern const struct mem_type *get_mem_type(unsigned int type);
@@ -44,4 +49,7 @@ extern int ioremap_page(unsigned long virt, unsigned long phys,
44 const struct mem_type *mtype); 49 const struct mem_type *mtype);
45#else 50#else
46#define iotable_init(map,num) do { } while (0) 51#define iotable_init(map,num) do { } while (0)
52#define vm_reserve_area_early(a,s,c) do { } while (0)
53#endif
54
47#endif 55#endif
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index 26c511fddf8f..df818876fa31 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -11,6 +11,7 @@
11#ifndef __ASM_MACH_PCI_H 11#ifndef __ASM_MACH_PCI_H
12#define __ASM_MACH_PCI_H 12#define __ASM_MACH_PCI_H
13 13
14
14struct pci_sys_data; 15struct pci_sys_data;
15struct pci_ops; 16struct pci_ops;
16struct pci_bus; 17struct pci_bus;
@@ -55,6 +56,15 @@ struct pci_sys_data {
55void pci_common_init(struct hw_pci *); 56void pci_common_init(struct hw_pci *);
56 57
57/* 58/*
59 * Setup early fixed I/O mapping.
60 */
61#if defined(CONFIG_PCI)
62extern void pci_map_io_early(unsigned long pfn);
63#else
64static inline void pci_map_io_early(unsigned long pfn) {}
65#endif
66
67/*
58 * PCI controllers 68 * PCI controllers
59 */ 69 */
60extern struct pci_ops iop3xx_ops; 70extern struct pci_ops iop3xx_ops;
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 25552508c3fd..c3165f0fef63 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -13,6 +13,7 @@
13#include <linux/io.h> 13#include <linux/io.h>
14 14
15#include <asm/mach-types.h> 15#include <asm/mach-types.h>
16#include <asm/mach/map.h>
16#include <asm/mach/pci.h> 17#include <asm/mach/pci.h>
17 18
18static int debug_pci; 19static int debug_pci;
@@ -627,3 +628,15 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
627 628
628 return 0; 629 return 0;
629} 630}
631
632void __init pci_map_io_early(unsigned long pfn)
633{
634 struct map_desc pci_io_desc = {
635 .virtual = PCI_IO_VIRT_BASE,
636 .type = MT_DEVICE,
637 .length = SZ_64K,
638 };
639
640 pci_io_desc.pfn = pfn;
641 iotable_init(&pci_io_desc, 1);
642}
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 4f55f5062ab7..8727802f8661 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -36,6 +36,7 @@
36#include <asm/system_info.h> 36#include <asm/system_info.h>
37 37
38#include <asm/mach/map.h> 38#include <asm/mach/map.h>
39#include <asm/mach/pci.h>
39#include "mm.h" 40#include "mm.h"
40 41
41int ioremap_page(unsigned long virt, unsigned long phys, 42int ioremap_page(unsigned long virt, unsigned long phys,
@@ -383,3 +384,16 @@ void __arm_iounmap(volatile void __iomem *io_addr)
383 arch_iounmap(io_addr); 384 arch_iounmap(io_addr);
384} 385}
385EXPORT_SYMBOL(__arm_iounmap); 386EXPORT_SYMBOL(__arm_iounmap);
387
388#ifdef CONFIG_PCI
389int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
390{
391 BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
392
393 return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
394 PCI_IO_VIRT_BASE + offset + SZ_64K,
395 phys_addr,
396 __pgprot(get_mem_type(MT_DEVICE)->prot_pte));
397}
398EXPORT_SYMBOL_GPL(pci_ioremap_io);
399#endif
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index cf4528d51774..714a7fd99ca3 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -31,6 +31,7 @@
31 31
32#include <asm/mach/arch.h> 32#include <asm/mach/arch.h>
33#include <asm/mach/map.h> 33#include <asm/mach/map.h>
34#include <asm/mach/pci.h>
34 35
35#include "mm.h" 36#include "mm.h"
36 37
@@ -216,7 +217,7 @@ static struct mem_type mem_types[] = {
216 .prot_l1 = PMD_TYPE_TABLE, 217 .prot_l1 = PMD_TYPE_TABLE,
217 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, 218 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
218 .domain = DOMAIN_IO, 219 .domain = DOMAIN_IO,
219 }, 220 },
220 [MT_DEVICE_WC] = { /* ioremap_wc */ 221 [MT_DEVICE_WC] = { /* ioremap_wc */
221 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, 222 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
222 .prot_l1 = PMD_TYPE_TABLE, 223 .prot_l1 = PMD_TYPE_TABLE,
@@ -783,14 +784,27 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
783 create_mapping(md); 784 create_mapping(md);
784 vm->addr = (void *)(md->virtual & PAGE_MASK); 785 vm->addr = (void *)(md->virtual & PAGE_MASK);
785 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); 786 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
786 vm->phys_addr = __pfn_to_phys(md->pfn); 787 vm->phys_addr = __pfn_to_phys(md->pfn);
787 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; 788 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
788 vm->flags |= VM_ARM_MTYPE(md->type); 789 vm->flags |= VM_ARM_MTYPE(md->type);
789 vm->caller = iotable_init; 790 vm->caller = iotable_init;
790 vm_area_add_early(vm++); 791 vm_area_add_early(vm++);
791 } 792 }
792} 793}
793 794
795void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
796 void *caller)
797{
798 struct vm_struct *vm;
799
800 vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
801 vm->addr = (void *)addr;
802 vm->size = size;
803 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
804 vm->caller = caller;
805 vm_area_add_early(vm);
806}
807
794#ifndef CONFIG_ARM_LPAE 808#ifndef CONFIG_ARM_LPAE
795 809
796/* 810/*
@@ -808,14 +822,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
808 822
809static void __init pmd_empty_section_gap(unsigned long addr) 823static void __init pmd_empty_section_gap(unsigned long addr)
810{ 824{
811 struct vm_struct *vm; 825 vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap);
812
813 vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
814 vm->addr = (void *)addr;
815 vm->size = SECTION_SIZE;
816 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
817 vm->caller = pmd_empty_section_gap;
818 vm_area_add_early(vm);
819} 826}
820 827
821static void __init fill_pmd_gaps(void) 828static void __init fill_pmd_gaps(void)
@@ -864,6 +871,28 @@ static void __init fill_pmd_gaps(void)
864#define fill_pmd_gaps() do { } while (0) 871#define fill_pmd_gaps() do { } while (0)
865#endif 872#endif
866 873
874#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
875static void __init pci_reserve_io(void)
876{
877 struct vm_struct *vm;
878 unsigned long addr;
879
880 /* we're still single threaded hence no lock needed here */
881 for (vm = vmlist; vm; vm = vm->next) {
882 if (!(vm->flags & VM_ARM_STATIC_MAPPING))
883 continue;
884 addr = (unsigned long)vm->addr;
885 addr &= ~(SZ_2M - 1);
886 if (addr == PCI_IO_VIRT_BASE)
887 return;
888
889 }
890 vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
891}
892#else
893#define pci_reserve_io() do { } while (0)
894#endif
895
867static void * __initdata vmalloc_min = 896static void * __initdata vmalloc_min =
868 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET); 897 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
869 898
@@ -1147,6 +1176,9 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
1147 mdesc->map_io(); 1176 mdesc->map_io();
1148 fill_pmd_gaps(); 1177 fill_pmd_gaps();
1149 1178
1179 /* Reserve fixed i/o space in VMALLOC region */
1180 pci_reserve_io();
1181
1150 /* 1182 /*
1151 * Finally flush the caches and tlb to ensure that we're in a 1183 * Finally flush the caches and tlb to ensure that we're in a
1152 * consistent state wrt the writebuffer. This also ensures that 1184 * consistent state wrt the writebuffer. This also ensures that