aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2012-10-11 05:55:04 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-10-11 05:55:04 -0400
commita0f0dd57f4a85310d9936f1770a0424b49fef876 (patch)
tree2f85b8b67dda13d19b02ca39e0fbef921cb1cf8b /arch/arm/mm
parent2a552d5e63d7fa602c9a9a0717008737f55625a6 (diff)
parent846a136881b8f73c1f74250bf6acfaa309cab1f2 (diff)
Merge branch 'fixes' into for-linus
Conflicts: arch/arm/kernel/smp.c
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/alignment.c6
-rw-r--r--arch/arm/mm/cache-l2x0.c10
-rw-r--r--arch/arm/mm/cache-tauros2.c83
-rw-r--r--arch/arm/mm/cache-v7.S3
-rw-r--r--arch/arm/mm/dma-mapping.c266
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/mm/ioremap.c15
-rw-r--r--arch/arm/mm/mmu.c65
8 files changed, 343 insertions, 107 deletions
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 9107231aacc5..b9f60ebe3bc4 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -699,7 +699,6 @@ do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
699 unsigned long instr = *pinstr; 699 unsigned long instr = *pinstr;
700 u16 tinst1 = (instr >> 16) & 0xffff; 700 u16 tinst1 = (instr >> 16) & 0xffff;
701 u16 tinst2 = instr & 0xffff; 701 u16 tinst2 = instr & 0xffff;
702 poffset->un = 0;
703 702
704 switch (tinst1 & 0xffe0) { 703 switch (tinst1 & 0xffe0) {
705 /* A6.3.5 Load/Store multiple */ 704 /* A6.3.5 Load/Store multiple */
@@ -854,9 +853,10 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
854 break; 853 break;
855 854
856 case 0x08000000: /* ldm or stm, or thumb-2 32bit instruction */ 855 case 0x08000000: /* ldm or stm, or thumb-2 32bit instruction */
857 if (thumb2_32b) 856 if (thumb2_32b) {
857 offset.un = 0;
858 handler = do_alignment_t32_to_handler(&instr, regs, &offset); 858 handler = do_alignment_t32_to_handler(&instr, regs, &offset);
859 else 859 } else
860 handler = do_alignment_ldmstm; 860 handler = do_alignment_ldmstm;
861 break; 861 break;
862 862
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 2a8e380501e8..8a97e6443c62 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -368,14 +368,18 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
368 /* l2x0 controller is disabled */ 368 /* l2x0 controller is disabled */
369 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); 369 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
370 370
371 l2x0_saved_regs.aux_ctrl = aux;
372
373 l2x0_inv_all(); 371 l2x0_inv_all();
374 372
375 /* enable L2X0 */ 373 /* enable L2X0 */
376 writel_relaxed(1, l2x0_base + L2X0_CTRL); 374 writel_relaxed(1, l2x0_base + L2X0_CTRL);
377 } 375 }
378 376
377 /* Re-read it in case some bits are reserved. */
378 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
379
380 /* Save the value for resuming. */
381 l2x0_saved_regs.aux_ctrl = aux;
382
379 outer_cache.inv_range = l2x0_inv_range; 383 outer_cache.inv_range = l2x0_inv_range;
380 outer_cache.clean_range = l2x0_clean_range; 384 outer_cache.clean_range = l2x0_clean_range;
381 outer_cache.flush_range = l2x0_flush_range; 385 outer_cache.flush_range = l2x0_flush_range;
@@ -554,7 +558,7 @@ static const struct of_device_id l2x0_ids[] __initconst = {
554int __init l2x0_of_init(u32 aux_val, u32 aux_mask) 558int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
555{ 559{
556 struct device_node *np; 560 struct device_node *np;
557 struct l2x0_of_data *data; 561 const struct l2x0_of_data *data;
558 struct resource res; 562 struct resource res;
559 563
560 np = of_find_matching_node(NULL, l2x0_ids); 564 np = of_find_matching_node(NULL, l2x0_ids);
diff --git a/arch/arm/mm/cache-tauros2.c b/arch/arm/mm/cache-tauros2.c
index 23a7643e9a87..1be0f4e5e6eb 100644
--- a/arch/arm/mm/cache-tauros2.c
+++ b/arch/arm/mm/cache-tauros2.c
@@ -15,8 +15,11 @@
15 */ 15 */
16 16
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/of.h>
19#include <linux/of_address.h>
18#include <asm/cacheflush.h> 20#include <asm/cacheflush.h>
19#include <asm/cp15.h> 21#include <asm/cp15.h>
22#include <asm/cputype.h>
20#include <asm/hardware/cache-tauros2.h> 23#include <asm/hardware/cache-tauros2.h>
21 24
22 25
@@ -144,25 +147,8 @@ static inline void __init write_extra_features(u32 u)
144 __asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u)); 147 __asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u));
145} 148}
146 149
147static void __init disable_l2_prefetch(void)
148{
149 u32 u;
150
151 /*
152 * Read the CPU Extra Features register and verify that the
153 * Disable L2 Prefetch bit is set.
154 */
155 u = read_extra_features();
156 if (!(u & 0x01000000)) {
157 printk(KERN_INFO "Tauros2: Disabling L2 prefetch.\n");
158 write_extra_features(u | 0x01000000);
159 }
160}
161
162static inline int __init cpuid_scheme(void) 150static inline int __init cpuid_scheme(void)
163{ 151{
164 extern int processor_id;
165
166 return !!((processor_id & 0x000f0000) == 0x000f0000); 152 return !!((processor_id & 0x000f0000) == 0x000f0000);
167} 153}
168 154
@@ -189,12 +175,36 @@ static inline void __init write_actlr(u32 actlr)
189 __asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr)); 175 __asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr));
190} 176}
191 177
192void __init tauros2_init(void) 178static void enable_extra_feature(unsigned int features)
179{
180 u32 u;
181
182 u = read_extra_features();
183
184 if (features & CACHE_TAUROS2_PREFETCH_ON)
185 u &= ~0x01000000;
186 else
187 u |= 0x01000000;
188 printk(KERN_INFO "Tauros2: %s L2 prefetch.\n",
189 (features & CACHE_TAUROS2_PREFETCH_ON)
190 ? "Enabling" : "Disabling");
191
192 if (features & CACHE_TAUROS2_LINEFILL_BURST8)
193 u |= 0x00100000;
194 else
195 u &= ~0x00100000;
196 printk(KERN_INFO "Tauros2: %s line fill burt8.\n",
197 (features & CACHE_TAUROS2_LINEFILL_BURST8)
198 ? "Enabling" : "Disabling");
199
200 write_extra_features(u);
201}
202
203static void __init tauros2_internal_init(unsigned int features)
193{ 204{
194 extern int processor_id; 205 char *mode = NULL;
195 char *mode;
196 206
197 disable_l2_prefetch(); 207 enable_extra_feature(features);
198 208
199#ifdef CONFIG_CPU_32v5 209#ifdef CONFIG_CPU_32v5
200 if ((processor_id & 0xff0f0000) == 0x56050000) { 210 if ((processor_id & 0xff0f0000) == 0x56050000) {
@@ -286,3 +296,34 @@ void __init tauros2_init(void)
286 printk(KERN_INFO "Tauros2: L2 cache support initialised " 296 printk(KERN_INFO "Tauros2: L2 cache support initialised "
287 "in %s mode.\n", mode); 297 "in %s mode.\n", mode);
288} 298}
299
300#ifdef CONFIG_OF
301static const struct of_device_id tauros2_ids[] __initconst = {
302 { .compatible = "marvell,tauros2-cache"},
303 {}
304};
305#endif
306
307void __init tauros2_init(unsigned int features)
308{
309#ifdef CONFIG_OF
310 struct device_node *node;
311 int ret;
312 unsigned int f;
313
314 node = of_find_matching_node(NULL, tauros2_ids);
315 if (!node) {
316 pr_info("Not found marvell,tauros2-cache, disable it\n");
317 return;
318 }
319
320 ret = of_property_read_u32(node, "marvell,tauros2-cache-features", &f);
321 if (ret) {
322 pr_info("Not found marvell,tauros-cache-features property, "
323 "disable extra features\n");
324 features = 0;
325 } else
326 features = f;
327#endif
328 tauros2_internal_init(features);
329}
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 140b294bbd9b..cd956647c21a 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -247,6 +247,9 @@ ENTRY(v7_coherent_user_range)
247 * isn't mapped, fail with -EFAULT. 247 * isn't mapped, fail with -EFAULT.
248 */ 248 */
2499001: 2499001:
250#ifdef CONFIG_ARM_ERRATA_775420
251 dsb
252#endif
250 mov r0, #-EFAULT 253 mov r0, #-EFAULT
251 mov pc, lr 254 mov pc, lr
252 UNWIND(.fnend ) 255 UNWIND(.fnend )
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e59c4ab71bcb..477a2d23ddf1 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -73,11 +73,18 @@ static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
73 unsigned long offset, size_t size, enum dma_data_direction dir, 73 unsigned long offset, size_t size, enum dma_data_direction dir,
74 struct dma_attrs *attrs) 74 struct dma_attrs *attrs)
75{ 75{
76 if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 76 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
77 __dma_page_cpu_to_dev(page, offset, size, dir); 77 __dma_page_cpu_to_dev(page, offset, size, dir);
78 return pfn_to_dma(dev, page_to_pfn(page)) + offset; 78 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
79} 79}
80 80
81static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
82 unsigned long offset, size_t size, enum dma_data_direction dir,
83 struct dma_attrs *attrs)
84{
85 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
86}
87
81/** 88/**
82 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 89 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
83 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 90 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -96,7 +103,7 @@ static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
96 size_t size, enum dma_data_direction dir, 103 size_t size, enum dma_data_direction dir,
97 struct dma_attrs *attrs) 104 struct dma_attrs *attrs)
98{ 105{
99 if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 106 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
100 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), 107 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
101 handle & ~PAGE_MASK, size, dir); 108 handle & ~PAGE_MASK, size, dir);
102} 109}
@@ -106,8 +113,7 @@ static void arm_dma_sync_single_for_cpu(struct device *dev,
106{ 113{
107 unsigned int offset = handle & (PAGE_SIZE - 1); 114 unsigned int offset = handle & (PAGE_SIZE - 1);
108 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 115 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
109 if (!arch_is_coherent()) 116 __dma_page_dev_to_cpu(page, offset, size, dir);
110 __dma_page_dev_to_cpu(page, offset, size, dir);
111} 117}
112 118
113static void arm_dma_sync_single_for_device(struct device *dev, 119static void arm_dma_sync_single_for_device(struct device *dev,
@@ -115,8 +121,7 @@ static void arm_dma_sync_single_for_device(struct device *dev,
115{ 121{
116 unsigned int offset = handle & (PAGE_SIZE - 1); 122 unsigned int offset = handle & (PAGE_SIZE - 1);
117 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 123 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
118 if (!arch_is_coherent()) 124 __dma_page_cpu_to_dev(page, offset, size, dir);
119 __dma_page_cpu_to_dev(page, offset, size, dir);
120} 125}
121 126
122static int arm_dma_set_mask(struct device *dev, u64 dma_mask); 127static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
@@ -138,6 +143,22 @@ struct dma_map_ops arm_dma_ops = {
138}; 143};
139EXPORT_SYMBOL(arm_dma_ops); 144EXPORT_SYMBOL(arm_dma_ops);
140 145
146static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
147 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs);
148static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
149 dma_addr_t handle, struct dma_attrs *attrs);
150
151struct dma_map_ops arm_coherent_dma_ops = {
152 .alloc = arm_coherent_dma_alloc,
153 .free = arm_coherent_dma_free,
154 .mmap = arm_dma_mmap,
155 .get_sgtable = arm_dma_get_sgtable,
156 .map_page = arm_coherent_dma_map_page,
157 .map_sg = arm_dma_map_sg,
158 .set_dma_mask = arm_dma_set_mask,
159};
160EXPORT_SYMBOL(arm_coherent_dma_ops);
161
141static u64 get_coherent_dma_mask(struct device *dev) 162static u64 get_coherent_dma_mask(struct device *dev)
142{ 163{
143 u64 mask = (u64)arm_dma_limit; 164 u64 mask = (u64)arm_dma_limit;
@@ -346,6 +367,8 @@ static int __init atomic_pool_init(void)
346 (unsigned)pool->size / 1024); 367 (unsigned)pool->size / 1024);
347 return 0; 368 return 0;
348 } 369 }
370
371 kfree(pages);
349no_pages: 372no_pages:
350 kfree(bitmap); 373 kfree(bitmap);
351no_bitmap: 374no_bitmap:
@@ -584,7 +607,7 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
584 607
585 608
586static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 609static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
587 gfp_t gfp, pgprot_t prot, const void *caller) 610 gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller)
588{ 611{
589 u64 mask = get_coherent_dma_mask(dev); 612 u64 mask = get_coherent_dma_mask(dev);
590 struct page *page; 613 struct page *page;
@@ -617,7 +640,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
617 *handle = DMA_ERROR_CODE; 640 *handle = DMA_ERROR_CODE;
618 size = PAGE_ALIGN(size); 641 size = PAGE_ALIGN(size);
619 642
620 if (arch_is_coherent() || nommu()) 643 if (is_coherent || nommu())
621 addr = __alloc_simple_buffer(dev, size, gfp, &page); 644 addr = __alloc_simple_buffer(dev, size, gfp, &page);
622 else if (gfp & GFP_ATOMIC) 645 else if (gfp & GFP_ATOMIC)
623 addr = __alloc_from_pool(size, &page); 646 addr = __alloc_from_pool(size, &page);
@@ -645,7 +668,20 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
645 if (dma_alloc_from_coherent(dev, size, handle, &memory)) 668 if (dma_alloc_from_coherent(dev, size, handle, &memory))
646 return memory; 669 return memory;
647 670
648 return __dma_alloc(dev, size, handle, gfp, prot, 671 return __dma_alloc(dev, size, handle, gfp, prot, false,
672 __builtin_return_address(0));
673}
674
675static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
676 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
677{
678 pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
679 void *memory;
680
681 if (dma_alloc_from_coherent(dev, size, handle, &memory))
682 return memory;
683
684 return __dma_alloc(dev, size, handle, gfp, prot, true,
649 __builtin_return_address(0)); 685 __builtin_return_address(0));
650} 686}
651 687
@@ -682,8 +718,9 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
682/* 718/*
683 * Free a buffer as defined by the above mapping. 719 * Free a buffer as defined by the above mapping.
684 */ 720 */
685void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 721static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
686 dma_addr_t handle, struct dma_attrs *attrs) 722 dma_addr_t handle, struct dma_attrs *attrs,
723 bool is_coherent)
687{ 724{
688 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 725 struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
689 726
@@ -692,7 +729,7 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
692 729
693 size = PAGE_ALIGN(size); 730 size = PAGE_ALIGN(size);
694 731
695 if (arch_is_coherent() || nommu()) { 732 if (is_coherent || nommu()) {
696 __dma_free_buffer(page, size); 733 __dma_free_buffer(page, size);
697 } else if (__free_from_pool(cpu_addr, size)) { 734 } else if (__free_from_pool(cpu_addr, size)) {
698 return; 735 return;
@@ -708,6 +745,18 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
708 } 745 }
709} 746}
710 747
748void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
749 dma_addr_t handle, struct dma_attrs *attrs)
750{
751 __arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
752}
753
754static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
755 dma_addr_t handle, struct dma_attrs *attrs)
756{
757 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
758}
759
711int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 760int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
712 void *cpu_addr, dma_addr_t handle, size_t size, 761 void *cpu_addr, dma_addr_t handle, size_t size,
713 struct dma_attrs *attrs) 762 struct dma_attrs *attrs)
@@ -1010,11 +1059,12 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
1010 if (!pages[i]) 1059 if (!pages[i])
1011 goto error; 1060 goto error;
1012 1061
1013 if (order) 1062 if (order) {
1014 split_page(pages[i], order); 1063 split_page(pages[i], order);
1015 j = 1 << order; 1064 j = 1 << order;
1016 while (--j) 1065 while (--j)
1017 pages[i + j] = pages[i] + j; 1066 pages[i + j] = pages[i] + j;
1067 }
1018 1068
1019 __dma_clear_buffer(pages[i], PAGE_SIZE << order); 1069 __dma_clear_buffer(pages[i], PAGE_SIZE << order);
1020 i += 1 << order; 1070 i += 1 << order;
@@ -1301,7 +1351,8 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
1301 */ 1351 */
1302static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, 1352static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1303 size_t size, dma_addr_t *handle, 1353 size_t size, dma_addr_t *handle,
1304 enum dma_data_direction dir, struct dma_attrs *attrs) 1354 enum dma_data_direction dir, struct dma_attrs *attrs,
1355 bool is_coherent)
1305{ 1356{
1306 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1357 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1307 dma_addr_t iova, iova_base; 1358 dma_addr_t iova, iova_base;
@@ -1320,8 +1371,8 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1320 phys_addr_t phys = page_to_phys(sg_page(s)); 1371 phys_addr_t phys = page_to_phys(sg_page(s));
1321 unsigned int len = PAGE_ALIGN(s->offset + s->length); 1372 unsigned int len = PAGE_ALIGN(s->offset + s->length);
1322 1373
1323 if (!arch_is_coherent() && 1374 if (!is_coherent &&
1324 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 1375 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
1325 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 1376 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1326 1377
1327 ret = iommu_map(mapping->domain, iova, phys, len, 0); 1378 ret = iommu_map(mapping->domain, iova, phys, len, 0);
@@ -1339,20 +1390,9 @@ fail:
1339 return ret; 1390 return ret;
1340} 1391}
1341 1392
1342/** 1393static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1343 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA 1394 enum dma_data_direction dir, struct dma_attrs *attrs,
1344 * @dev: valid struct device pointer 1395 bool is_coherent)
1345 * @sg: list of buffers
1346 * @nents: number of buffers to map
1347 * @dir: DMA transfer direction
1348 *
1349 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1350 * The scatter gather list elements are merged together (if possible) and
1351 * tagged with the appropriate dma address and length. They are obtained via
1352 * sg_dma_{address,length}.
1353 */
1354int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1355 enum dma_data_direction dir, struct dma_attrs *attrs)
1356{ 1396{
1357 struct scatterlist *s = sg, *dma = sg, *start = sg; 1397 struct scatterlist *s = sg, *dma = sg, *start = sg;
1358 int i, count = 0; 1398 int i, count = 0;
@@ -1368,7 +1408,7 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1368 1408
1369 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { 1409 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
1370 if (__map_sg_chunk(dev, start, size, &dma->dma_address, 1410 if (__map_sg_chunk(dev, start, size, &dma->dma_address,
1371 dir, attrs) < 0) 1411 dir, attrs, is_coherent) < 0)
1372 goto bad_mapping; 1412 goto bad_mapping;
1373 1413
1374 dma->dma_address += offset; 1414 dma->dma_address += offset;
@@ -1381,7 +1421,8 @@ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1381 } 1421 }
1382 size += s->length; 1422 size += s->length;
1383 } 1423 }
1384 if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs) < 0) 1424 if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
1425 is_coherent) < 0)
1385 goto bad_mapping; 1426 goto bad_mapping;
1386 1427
1387 dma->dma_address += offset; 1428 dma->dma_address += offset;
@@ -1396,17 +1437,44 @@ bad_mapping:
1396} 1437}
1397 1438
1398/** 1439/**
1399 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 1440 * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1400 * @dev: valid struct device pointer 1441 * @dev: valid struct device pointer
1401 * @sg: list of buffers 1442 * @sg: list of buffers
1402 * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 1443 * @nents: number of buffers to map
1403 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1444 * @dir: DMA transfer direction
1404 * 1445 *
1405 * Unmap a set of streaming mode DMA translations. Again, CPU access 1446 * Map a set of i/o coherent buffers described by scatterlist in streaming
1406 * rules concerning calls here are the same as for dma_unmap_single(). 1447 * mode for DMA. The scatter gather list elements are merged together (if
1448 * possible) and tagged with the appropriate dma address and length. They are
1449 * obtained via sg_dma_{address,length}.
1407 */ 1450 */
1408void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 1451int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1409 enum dma_data_direction dir, struct dma_attrs *attrs) 1452 int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
1453{
1454 return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
1455}
1456
1457/**
1458 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1459 * @dev: valid struct device pointer
1460 * @sg: list of buffers
1461 * @nents: number of buffers to map
1462 * @dir: DMA transfer direction
1463 *
1464 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1465 * The scatter gather list elements are merged together (if possible) and
1466 * tagged with the appropriate dma address and length. They are obtained via
1467 * sg_dma_{address,length}.
1468 */
1469int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1470 int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
1471{
1472 return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
1473}
1474
1475static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
1476 int nents, enum dma_data_direction dir, struct dma_attrs *attrs,
1477 bool is_coherent)
1410{ 1478{
1411 struct scatterlist *s; 1479 struct scatterlist *s;
1412 int i; 1480 int i;
@@ -1415,7 +1483,7 @@ void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1415 if (sg_dma_len(s)) 1483 if (sg_dma_len(s))
1416 __iommu_remove_mapping(dev, sg_dma_address(s), 1484 __iommu_remove_mapping(dev, sg_dma_address(s),
1417 sg_dma_len(s)); 1485 sg_dma_len(s));
1418 if (!arch_is_coherent() && 1486 if (!is_coherent &&
1419 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 1487 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
1420 __dma_page_dev_to_cpu(sg_page(s), s->offset, 1488 __dma_page_dev_to_cpu(sg_page(s), s->offset,
1421 s->length, dir); 1489 s->length, dir);
@@ -1423,6 +1491,38 @@ void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1423} 1491}
1424 1492
1425/** 1493/**
1494 * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1495 * @dev: valid struct device pointer
1496 * @sg: list of buffers
1497 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1498 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1499 *
1500 * Unmap a set of streaming mode DMA translations. Again, CPU access
1501 * rules concerning calls here are the same as for dma_unmap_single().
1502 */
1503void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
1504 int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
1505{
1506 __iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
1507}
1508
1509/**
1510 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1511 * @dev: valid struct device pointer
1512 * @sg: list of buffers
1513 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1514 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1515 *
1516 * Unmap a set of streaming mode DMA translations. Again, CPU access
1517 * rules concerning calls here are the same as for dma_unmap_single().
1518 */
1519void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1520 enum dma_data_direction dir, struct dma_attrs *attrs)
1521{
1522 __iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
1523}
1524
1525/**
1426 * arm_iommu_sync_sg_for_cpu 1526 * arm_iommu_sync_sg_for_cpu
1427 * @dev: valid struct device pointer 1527 * @dev: valid struct device pointer
1428 * @sg: list of buffers 1528 * @sg: list of buffers
@@ -1436,8 +1536,7 @@ void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1436 int i; 1536 int i;
1437 1537
1438 for_each_sg(sg, s, nents, i) 1538 for_each_sg(sg, s, nents, i)
1439 if (!arch_is_coherent()) 1539 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
1440 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
1441 1540
1442} 1541}
1443 1542
@@ -1455,22 +1554,21 @@ void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1455 int i; 1554 int i;
1456 1555
1457 for_each_sg(sg, s, nents, i) 1556 for_each_sg(sg, s, nents, i)
1458 if (!arch_is_coherent()) 1557 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1459 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1460} 1558}
1461 1559
1462 1560
1463/** 1561/**
1464 * arm_iommu_map_page 1562 * arm_coherent_iommu_map_page
1465 * @dev: valid struct device pointer 1563 * @dev: valid struct device pointer
1466 * @page: page that buffer resides in 1564 * @page: page that buffer resides in
1467 * @offset: offset into page for start of buffer 1565 * @offset: offset into page for start of buffer
1468 * @size: size of buffer to map 1566 * @size: size of buffer to map
1469 * @dir: DMA transfer direction 1567 * @dir: DMA transfer direction
1470 * 1568 *
1471 * IOMMU aware version of arm_dma_map_page() 1569 * Coherent IOMMU aware version of arm_dma_map_page()
1472 */ 1570 */
1473static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, 1571static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
1474 unsigned long offset, size_t size, enum dma_data_direction dir, 1572 unsigned long offset, size_t size, enum dma_data_direction dir,
1475 struct dma_attrs *attrs) 1573 struct dma_attrs *attrs)
1476{ 1574{
@@ -1478,9 +1576,6 @@ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
1478 dma_addr_t dma_addr; 1576 dma_addr_t dma_addr;
1479 int ret, len = PAGE_ALIGN(size + offset); 1577 int ret, len = PAGE_ALIGN(size + offset);
1480 1578
1481 if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
1482 __dma_page_cpu_to_dev(page, offset, size, dir);
1483
1484 dma_addr = __alloc_iova(mapping, len); 1579 dma_addr = __alloc_iova(mapping, len);
1485 if (dma_addr == DMA_ERROR_CODE) 1580 if (dma_addr == DMA_ERROR_CODE)
1486 return dma_addr; 1581 return dma_addr;
@@ -1496,6 +1591,51 @@ fail:
1496} 1591}
1497 1592
1498/** 1593/**
1594 * arm_iommu_map_page
1595 * @dev: valid struct device pointer
1596 * @page: page that buffer resides in
1597 * @offset: offset into page for start of buffer
1598 * @size: size of buffer to map
1599 * @dir: DMA transfer direction
1600 *
1601 * IOMMU aware version of arm_dma_map_page()
1602 */
1603static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
1604 unsigned long offset, size_t size, enum dma_data_direction dir,
1605 struct dma_attrs *attrs)
1606{
1607 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
1608 __dma_page_cpu_to_dev(page, offset, size, dir);
1609
1610 return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
1611}
1612
1613/**
1614 * arm_coherent_iommu_unmap_page
1615 * @dev: valid struct device pointer
1616 * @handle: DMA address of buffer
1617 * @size: size of buffer (same as passed to dma_map_page)
1618 * @dir: DMA transfer direction (same as passed to dma_map_page)
1619 *
1620 * Coherent IOMMU aware version of arm_dma_unmap_page()
1621 */
1622static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1623 size_t size, enum dma_data_direction dir,
1624 struct dma_attrs *attrs)
1625{
1626 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1627 dma_addr_t iova = handle & PAGE_MASK;
1628 int offset = handle & ~PAGE_MASK;
1629 int len = PAGE_ALIGN(size + offset);
1630
1631 if (!iova)
1632 return;
1633
1634 iommu_unmap(mapping->domain, iova, len);
1635 __free_iova(mapping, iova, len);
1636}
1637
1638/**
1499 * arm_iommu_unmap_page 1639 * arm_iommu_unmap_page
1500 * @dev: valid struct device pointer 1640 * @dev: valid struct device pointer
1501 * @handle: DMA address of buffer 1641 * @handle: DMA address of buffer
@@ -1517,7 +1657,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1517 if (!iova) 1657 if (!iova)
1518 return; 1658 return;
1519 1659
1520 if (!arch_is_coherent() && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 1660 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
1521 __dma_page_dev_to_cpu(page, offset, size, dir); 1661 __dma_page_dev_to_cpu(page, offset, size, dir);
1522 1662
1523 iommu_unmap(mapping->domain, iova, len); 1663 iommu_unmap(mapping->domain, iova, len);
@@ -1535,8 +1675,7 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
1535 if (!iova) 1675 if (!iova)
1536 return; 1676 return;
1537 1677
1538 if (!arch_is_coherent()) 1678 __dma_page_dev_to_cpu(page, offset, size, dir);
1539 __dma_page_dev_to_cpu(page, offset, size, dir);
1540} 1679}
1541 1680
1542static void arm_iommu_sync_single_for_device(struct device *dev, 1681static void arm_iommu_sync_single_for_device(struct device *dev,
@@ -1570,6 +1709,19 @@ struct dma_map_ops iommu_ops = {
1570 .sync_sg_for_device = arm_iommu_sync_sg_for_device, 1709 .sync_sg_for_device = arm_iommu_sync_sg_for_device,
1571}; 1710};
1572 1711
1712struct dma_map_ops iommu_coherent_ops = {
1713 .alloc = arm_iommu_alloc_attrs,
1714 .free = arm_iommu_free_attrs,
1715 .mmap = arm_iommu_mmap_attrs,
1716 .get_sgtable = arm_iommu_get_sgtable,
1717
1718 .map_page = arm_coherent_iommu_map_page,
1719 .unmap_page = arm_coherent_iommu_unmap_page,
1720
1721 .map_sg = arm_coherent_iommu_map_sg,
1722 .unmap_sg = arm_coherent_iommu_unmap_sg,
1723};
1724
1573/** 1725/**
1574 * arm_iommu_create_mapping 1726 * arm_iommu_create_mapping
1575 * @bus: pointer to the bus holding the client device (for IOMMU calls) 1727 * @bus: pointer to the bus holding the client device (for IOMMU calls)
@@ -1663,7 +1815,7 @@ int arm_iommu_attach_device(struct device *dev,
1663 dev->archdata.mapping = mapping; 1815 dev->archdata.mapping = mapping;
1664 set_dma_ops(dev, &iommu_ops); 1816 set_dma_ops(dev, &iommu_ops);
1665 1817
1666 pr_info("Attached IOMMU controller to %s device.\n", dev_name(dev)); 1818 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
1667 return 0; 1819 return 0;
1668} 1820}
1669 1821
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 9aec41fa80ae..ad722f1208a5 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -324,7 +324,7 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
324 324
325 BUG_ON(!arm_memblock_steal_permitted); 325 BUG_ON(!arm_memblock_steal_permitted);
326 326
327 phys = memblock_alloc(size, align); 327 phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
328 memblock_free(phys, size); 328 memblock_free(phys, size);
329 memblock_remove(phys, size); 329 memblock_remove(phys, size);
330 330
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 566750fa57d4..5dcc2fd46c46 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -36,6 +36,7 @@
36#include <asm/system_info.h> 36#include <asm/system_info.h>
37 37
38#include <asm/mach/map.h> 38#include <asm/mach/map.h>
39#include <asm/mach/pci.h>
39#include "mm.h" 40#include "mm.h"
40 41
41int ioremap_page(unsigned long virt, unsigned long phys, 42int ioremap_page(unsigned long virt, unsigned long phys,
@@ -247,6 +248,7 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
247 if (!area) 248 if (!area)
248 return NULL; 249 return NULL;
249 addr = (unsigned long)area->addr; 250 addr = (unsigned long)area->addr;
251 area->phys_addr = __pfn_to_phys(pfn);
250 252
251#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) 253#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
252 if (DOMAIN_IO == 0 && 254 if (DOMAIN_IO == 0 &&
@@ -383,3 +385,16 @@ void __arm_iounmap(volatile void __iomem *io_addr)
383 arch_iounmap(io_addr); 385 arch_iounmap(io_addr);
384} 386}
385EXPORT_SYMBOL(__arm_iounmap); 387EXPORT_SYMBOL(__arm_iounmap);
388
389#ifdef CONFIG_PCI
390int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
391{
392 BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
393
394 return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
395 PCI_IO_VIRT_BASE + offset + SZ_64K,
396 phys_addr,
397 __pgprot(get_mem_type(MT_DEVICE)->prot_pte));
398}
399EXPORT_SYMBOL_GPL(pci_ioremap_io);
400#endif
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index c2fa21d0103e..941dfb9e9a78 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -31,6 +31,7 @@
31 31
32#include <asm/mach/arch.h> 32#include <asm/mach/arch.h>
33#include <asm/mach/map.h> 33#include <asm/mach/map.h>
34#include <asm/mach/pci.h>
34 35
35#include "mm.h" 36#include "mm.h"
36 37
@@ -216,7 +217,7 @@ static struct mem_type mem_types[] = {
216 .prot_l1 = PMD_TYPE_TABLE, 217 .prot_l1 = PMD_TYPE_TABLE,
217 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, 218 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
218 .domain = DOMAIN_IO, 219 .domain = DOMAIN_IO,
219 }, 220 },
220 [MT_DEVICE_WC] = { /* ioremap_wc */ 221 [MT_DEVICE_WC] = { /* ioremap_wc */
221 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, 222 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
222 .prot_l1 = PMD_TYPE_TABLE, 223 .prot_l1 = PMD_TYPE_TABLE,
@@ -422,17 +423,6 @@ static void __init build_mem_type_table(void)
422 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; 423 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
423 424
424 /* 425 /*
425 * Enable CPU-specific coherency if supported.
426 * (Only available on XSC3 at the moment.)
427 */
428 if (arch_is_coherent() && cpu_is_xsc3()) {
429 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
430 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
431 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
432 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
433 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
434 }
435 /*
436 * ARMv6 and above have extended page tables. 426 * ARMv6 and above have extended page tables.
437 */ 427 */
438 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { 428 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
@@ -777,14 +767,27 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
777 create_mapping(md); 767 create_mapping(md);
778 vm->addr = (void *)(md->virtual & PAGE_MASK); 768 vm->addr = (void *)(md->virtual & PAGE_MASK);
779 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); 769 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
780 vm->phys_addr = __pfn_to_phys(md->pfn); 770 vm->phys_addr = __pfn_to_phys(md->pfn);
781 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; 771 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
782 vm->flags |= VM_ARM_MTYPE(md->type); 772 vm->flags |= VM_ARM_MTYPE(md->type);
783 vm->caller = iotable_init; 773 vm->caller = iotable_init;
784 vm_area_add_early(vm++); 774 vm_area_add_early(vm++);
785 } 775 }
786} 776}
787 777
778void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
779 void *caller)
780{
781 struct vm_struct *vm;
782
783 vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
784 vm->addr = (void *)addr;
785 vm->size = size;
786 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
787 vm->caller = caller;
788 vm_area_add_early(vm);
789}
790
788#ifndef CONFIG_ARM_LPAE 791#ifndef CONFIG_ARM_LPAE
789 792
790/* 793/*
@@ -802,14 +805,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
802 805
803static void __init pmd_empty_section_gap(unsigned long addr) 806static void __init pmd_empty_section_gap(unsigned long addr)
804{ 807{
805 struct vm_struct *vm; 808 vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap);
806
807 vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
808 vm->addr = (void *)addr;
809 vm->size = SECTION_SIZE;
810 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
811 vm->caller = pmd_empty_section_gap;
812 vm_area_add_early(vm);
813} 809}
814 810
815static void __init fill_pmd_gaps(void) 811static void __init fill_pmd_gaps(void)
@@ -858,6 +854,28 @@ static void __init fill_pmd_gaps(void)
858#define fill_pmd_gaps() do { } while (0) 854#define fill_pmd_gaps() do { } while (0)
859#endif 855#endif
860 856
857#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
858static void __init pci_reserve_io(void)
859{
860 struct vm_struct *vm;
861 unsigned long addr;
862
863 /* we're still single threaded hence no lock needed here */
864 for (vm = vmlist; vm; vm = vm->next) {
865 if (!(vm->flags & VM_ARM_STATIC_MAPPING))
866 continue;
867 addr = (unsigned long)vm->addr;
868 addr &= ~(SZ_2M - 1);
869 if (addr == PCI_IO_VIRT_BASE)
870 return;
871
872 }
873 vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
874}
875#else
876#define pci_reserve_io() do { } while (0)
877#endif
878
861static void * __initdata vmalloc_min = 879static void * __initdata vmalloc_min =
862 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET); 880 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
863 881
@@ -1141,6 +1159,9 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
1141 mdesc->map_io(); 1159 mdesc->map_io();
1142 fill_pmd_gaps(); 1160 fill_pmd_gaps();
1143 1161
1162 /* Reserve fixed i/o space in VMALLOC region */
1163 pci_reserve_io();
1164
1144 /* 1165 /*
1145 * Finally flush the caches and tlb to ensure that we're in a 1166 * Finally flush the caches and tlb to ensure that we're in a
1146 * consistent state wrt the writebuffer. This also ensures that 1167 * consistent state wrt the writebuffer. This also ensures that