aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig7
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c49
-rw-r--r--arch/arm/mm/cache-tauros2.c29
-rw-r--r--arch/arm/mm/dma-mapping.c147
-rw-r--r--arch/arm/mm/dump.c47
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/mm/mmu.c10
-rw-r--r--arch/arm/mm/proc-macros.S19
-rw-r--r--arch/arm/mm/proc-v7-2level.S7
-rw-r--r--arch/arm/mm/proc-v7.S11
10 files changed, 226 insertions, 102 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 1f8fed94c2a4..f5ad9ee70426 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -264,7 +264,7 @@ config CPU_ARM1026
264 264
265# SA110 265# SA110
266config CPU_SA110 266config CPU_SA110
267 bool "Support StrongARM(R) SA-110 processor" if ARCH_RPC 267 bool
268 select CPU_32v3 if ARCH_RPC 268 select CPU_32v3 if ARCH_RPC
269 select CPU_32v4 if !ARCH_RPC 269 select CPU_32v4 if !ARCH_RPC
270 select CPU_ABRT_EV4 270 select CPU_ABRT_EV4
@@ -446,7 +446,6 @@ config CPU_32v5
446 446
447config CPU_32v6 447config CPU_32v6
448 bool 448 bool
449 select CPU_USE_DOMAINS if CPU_V6 && MMU
450 select TLS_REG_EMUL if !CPU_32v6K && !MMU 449 select TLS_REG_EMUL if !CPU_32v6K && !MMU
451 450
452config CPU_32v6K 451config CPU_32v6K
@@ -671,7 +670,7 @@ config ARM_VIRT_EXT
671 670
672config SWP_EMULATE 671config SWP_EMULATE
673 bool "Emulate SWP/SWPB instructions" 672 bool "Emulate SWP/SWPB instructions"
674 depends on !CPU_USE_DOMAINS && CPU_V7 673 depends on CPU_V7
675 default y if SMP 674 default y if SMP
676 select HAVE_PROC_CPU if PROC_FS 675 select HAVE_PROC_CPU if PROC_FS
677 help 676 help
@@ -855,7 +854,7 @@ config OUTER_CACHE_SYNC
855 854
856config CACHE_FEROCEON_L2 855config CACHE_FEROCEON_L2
857 bool "Enable the Feroceon L2 cache controller" 856 bool "Enable the Feroceon L2 cache controller"
858 depends on ARCH_KIRKWOOD || ARCH_MV78XX0 857 depends on ARCH_KIRKWOOD || ARCH_MV78XX0 || ARCH_MVEBU
859 default y 858 default y
860 select OUTER_CACHE 859 select OUTER_CACHE
861 help 860 help
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index 48bc3c0a87ce..dc814a548056 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -13,10 +13,15 @@
13 */ 13 */
14 14
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/of.h>
17#include <linux/of_address.h>
16#include <linux/highmem.h> 18#include <linux/highmem.h>
19#include <linux/io.h>
17#include <asm/cacheflush.h> 20#include <asm/cacheflush.h>
18#include <asm/cp15.h> 21#include <asm/cp15.h>
19#include <plat/cache-feroceon-l2.h> 22#include <asm/hardware/cache-feroceon-l2.h>
23
24#define L2_WRITETHROUGH_KIRKWOOD BIT(4)
20 25
21/* 26/*
22 * Low-level cache maintenance operations. 27 * Low-level cache maintenance operations.
@@ -331,7 +336,9 @@ static void __init enable_l2(void)
331 enable_icache(); 336 enable_icache();
332 if (d) 337 if (d)
333 enable_dcache(); 338 enable_dcache();
334 } 339 } else
340 pr_err(FW_BUG
341 "Feroceon L2: bootloader left the L2 cache on!\n");
335} 342}
336 343
337void __init feroceon_l2_init(int __l2_wt_override) 344void __init feroceon_l2_init(int __l2_wt_override)
@@ -350,3 +357,41 @@ void __init feroceon_l2_init(int __l2_wt_override)
350 printk(KERN_INFO "Feroceon L2: Cache support initialised%s.\n", 357 printk(KERN_INFO "Feroceon L2: Cache support initialised%s.\n",
351 l2_wt_override ? ", in WT override mode" : ""); 358 l2_wt_override ? ", in WT override mode" : "");
352} 359}
360#ifdef CONFIG_OF
361static const struct of_device_id feroceon_ids[] __initconst = {
362 { .compatible = "marvell,kirkwood-cache"},
363 { .compatible = "marvell,feroceon-cache"},
364 {}
365};
366
367int __init feroceon_of_init(void)
368{
369 struct device_node *node;
370 void __iomem *base;
371 bool l2_wt_override = false;
372 struct resource res;
373
374#if defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
375 l2_wt_override = true;
376#endif
377
378 node = of_find_matching_node(NULL, feroceon_ids);
379 if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) {
380 if (of_address_to_resource(node, 0, &res))
381 return -ENODEV;
382
383 base = ioremap(res.start, resource_size(&res));
384 if (!base)
385 return -ENOMEM;
386
387 if (l2_wt_override)
388 writel(readl(base) | L2_WRITETHROUGH_KIRKWOOD, base);
389 else
390 writel(readl(base) & ~L2_WRITETHROUGH_KIRKWOOD, base);
391 }
392
393 feroceon_l2_init(l2_wt_override);
394
395 return 0;
396}
397#endif
diff --git a/arch/arm/mm/cache-tauros2.c b/arch/arm/mm/cache-tauros2.c
index 1be0f4e5e6eb..b273739e6359 100644
--- a/arch/arm/mm/cache-tauros2.c
+++ b/arch/arm/mm/cache-tauros2.c
@@ -33,7 +33,7 @@
33 * outer cache operations into the kernel image if the kernel has been 33 * outer cache operations into the kernel image if the kernel has been
34 * configured to support a pre-v7 CPU. 34 * configured to support a pre-v7 CPU.
35 */ 35 */
36#if __LINUX_ARM_ARCH__ < 7 36#ifdef CONFIG_CPU_32v5
37/* 37/*
38 * Low-level cache maintenance operations. 38 * Low-level cache maintenance operations.
39 */ 39 */
@@ -229,33 +229,6 @@ static void __init tauros2_internal_init(unsigned int features)
229 } 229 }
230#endif 230#endif
231 231
232#ifdef CONFIG_CPU_32v6
233 /*
234 * Check whether this CPU lacks support for the v7 hierarchical
235 * cache ops. (PJ4 is in its v6 personality mode if the MMFR3
236 * register indicates no support for the v7 hierarchical cache
237 * ops.)
238 */
239 if (cpuid_scheme() && (read_mmfr3() & 0xf) == 0) {
240 /*
241 * When Tauros2 is used in an ARMv6 system, the L2
242 * enable bit is in the ARMv6 ARM-mandated position
243 * (bit [26] of the System Control Register).
244 */
245 if (!(get_cr() & 0x04000000)) {
246 printk(KERN_INFO "Tauros2: Enabling L2 cache.\n");
247 adjust_cr(0x04000000, 0x04000000);
248 }
249
250 mode = "ARMv6";
251 outer_cache.inv_range = tauros2_inv_range;
252 outer_cache.clean_range = tauros2_clean_range;
253 outer_cache.flush_range = tauros2_flush_range;
254 outer_cache.disable = tauros2_disable;
255 outer_cache.resume = tauros2_resume;
256 }
257#endif
258
259#ifdef CONFIG_CPU_32v7 232#ifdef CONFIG_CPU_32v7
260 /* 233 /*
261 * Check whether this CPU has support for the v7 hierarchical 234 * Check whether this CPU has support for the v7 hierarchical
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 11b3914660d2..f62aa0677e5c 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -284,9 +284,6 @@ static void __dma_free_buffer(struct page *page, size_t size)
284} 284}
285 285
286#ifdef CONFIG_MMU 286#ifdef CONFIG_MMU
287#ifdef CONFIG_HUGETLB_PAGE
288#warning ARM Coherent DMA allocator does not (yet) support huge TLB
289#endif
290 287
291static void *__alloc_from_contiguous(struct device *dev, size_t size, 288static void *__alloc_from_contiguous(struct device *dev, size_t size,
292 pgprot_t prot, struct page **ret_page, 289 pgprot_t prot, struct page **ret_page,
@@ -1069,6 +1066,8 @@ fs_initcall(dma_debug_do_init);
1069 1066
1070/* IOMMU */ 1067/* IOMMU */
1071 1068
1069static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
1070
1072static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, 1071static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
1073 size_t size) 1072 size_t size)
1074{ 1073{
@@ -1076,41 +1075,87 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
1076 unsigned int align = 0; 1075 unsigned int align = 0;
1077 unsigned int count, start; 1076 unsigned int count, start;
1078 unsigned long flags; 1077 unsigned long flags;
1078 dma_addr_t iova;
1079 int i;
1079 1080
1080 if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) 1081 if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
1081 order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; 1082 order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
1082 1083
1083 count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) + 1084 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1084 (1 << mapping->order) - 1) >> mapping->order; 1085 align = (1 << order) - 1;
1085
1086 if (order > mapping->order)
1087 align = (1 << (order - mapping->order)) - 1;
1088 1086
1089 spin_lock_irqsave(&mapping->lock, flags); 1087 spin_lock_irqsave(&mapping->lock, flags);
1090 start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0, 1088 for (i = 0; i < mapping->nr_bitmaps; i++) {
1091 count, align); 1089 start = bitmap_find_next_zero_area(mapping->bitmaps[i],
1092 if (start > mapping->bits) { 1090 mapping->bits, 0, count, align);
1093 spin_unlock_irqrestore(&mapping->lock, flags); 1091
1094 return DMA_ERROR_CODE; 1092 if (start > mapping->bits)
1093 continue;
1094
1095 bitmap_set(mapping->bitmaps[i], start, count);
1096 break;
1095 } 1097 }
1096 1098
1097 bitmap_set(mapping->bitmap, start, count); 1099 /*
1100 * No unused range found. Try to extend the existing mapping
1101 * and perform a second attempt to reserve an IO virtual
1102 * address range of size bytes.
1103 */
1104 if (i == mapping->nr_bitmaps) {
1105 if (extend_iommu_mapping(mapping)) {
1106 spin_unlock_irqrestore(&mapping->lock, flags);
1107 return DMA_ERROR_CODE;
1108 }
1109
1110 start = bitmap_find_next_zero_area(mapping->bitmaps[i],
1111 mapping->bits, 0, count, align);
1112
1113 if (start > mapping->bits) {
1114 spin_unlock_irqrestore(&mapping->lock, flags);
1115 return DMA_ERROR_CODE;
1116 }
1117
1118 bitmap_set(mapping->bitmaps[i], start, count);
1119 }
1098 spin_unlock_irqrestore(&mapping->lock, flags); 1120 spin_unlock_irqrestore(&mapping->lock, flags);
1099 1121
1100 return mapping->base + (start << (mapping->order + PAGE_SHIFT)); 1122 iova = mapping->base + (mapping->size * i);
1123 iova += start << PAGE_SHIFT;
1124
1125 return iova;
1101} 1126}
1102 1127
1103static inline void __free_iova(struct dma_iommu_mapping *mapping, 1128static inline void __free_iova(struct dma_iommu_mapping *mapping,
1104 dma_addr_t addr, size_t size) 1129 dma_addr_t addr, size_t size)
1105{ 1130{
1106 unsigned int start = (addr - mapping->base) >> 1131 unsigned int start, count;
1107 (mapping->order + PAGE_SHIFT);
1108 unsigned int count = ((size >> PAGE_SHIFT) +
1109 (1 << mapping->order) - 1) >> mapping->order;
1110 unsigned long flags; 1132 unsigned long flags;
1133 dma_addr_t bitmap_base;
1134 u32 bitmap_index;
1135
1136 if (!size)
1137 return;
1138
1139 bitmap_index = (u32) (addr - mapping->base) / (u32) mapping->size;
1140 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions);
1141
1142 bitmap_base = mapping->base + mapping->size * bitmap_index;
1143
1144 start = (addr - bitmap_base) >> PAGE_SHIFT;
1145
1146 if (addr + size > bitmap_base + mapping->size) {
1147 /*
1148 * The address range to be freed reaches into the iova
1149 * range of the next bitmap. This should not happen as
1150 * we don't allow this in __alloc_iova (at the
1151 * moment).
1152 */
1153 BUG();
1154 } else
1155 count = size >> PAGE_SHIFT;
1111 1156
1112 spin_lock_irqsave(&mapping->lock, flags); 1157 spin_lock_irqsave(&mapping->lock, flags);
1113 bitmap_clear(mapping->bitmap, start, count); 1158 bitmap_clear(mapping->bitmaps[bitmap_index], start, count);
1114 spin_unlock_irqrestore(&mapping->lock, flags); 1159 spin_unlock_irqrestore(&mapping->lock, flags);
1115} 1160}
1116 1161
@@ -1875,8 +1920,7 @@ struct dma_map_ops iommu_coherent_ops = {
1875 * arm_iommu_create_mapping 1920 * arm_iommu_create_mapping
1876 * @bus: pointer to the bus holding the client device (for IOMMU calls) 1921 * @bus: pointer to the bus holding the client device (for IOMMU calls)
1877 * @base: start address of the valid IO address space 1922 * @base: start address of the valid IO address space
1878 * @size: size of the valid IO address space 1923 * @size: maximum size of the valid IO address space
1879 * @order: accuracy of the IO addresses allocations
1880 * 1924 *
1881 * Creates a mapping structure which holds information about used/unused 1925 * Creates a mapping structure which holds information about used/unused
1882 * IO address ranges, which is required to perform memory allocation and 1926 * IO address ranges, which is required to perform memory allocation and
@@ -1886,38 +1930,54 @@ struct dma_map_ops iommu_coherent_ops = {
1886 * arm_iommu_attach_device function. 1930 * arm_iommu_attach_device function.
1887 */ 1931 */
1888struct dma_iommu_mapping * 1932struct dma_iommu_mapping *
1889arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size, 1933arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
1890 int order)
1891{ 1934{
1892 unsigned int count = size >> (PAGE_SHIFT + order); 1935 unsigned int bits = size >> PAGE_SHIFT;
1893 unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long); 1936 unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
1894 struct dma_iommu_mapping *mapping; 1937 struct dma_iommu_mapping *mapping;
1938 int extensions = 1;
1895 int err = -ENOMEM; 1939 int err = -ENOMEM;
1896 1940
1897 if (!count) 1941 if (!bitmap_size)
1898 return ERR_PTR(-EINVAL); 1942 return ERR_PTR(-EINVAL);
1899 1943
1944 if (bitmap_size > PAGE_SIZE) {
1945 extensions = bitmap_size / PAGE_SIZE;
1946 bitmap_size = PAGE_SIZE;
1947 }
1948
1900 mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); 1949 mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
1901 if (!mapping) 1950 if (!mapping)
1902 goto err; 1951 goto err;
1903 1952
1904 mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 1953 mapping->bitmap_size = bitmap_size;
1905 if (!mapping->bitmap) 1954 mapping->bitmaps = kzalloc(extensions * sizeof(unsigned long *),
1955 GFP_KERNEL);
1956 if (!mapping->bitmaps)
1906 goto err2; 1957 goto err2;
1907 1958
1959 mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL);
1960 if (!mapping->bitmaps[0])
1961 goto err3;
1962
1963 mapping->nr_bitmaps = 1;
1964 mapping->extensions = extensions;
1908 mapping->base = base; 1965 mapping->base = base;
1966 mapping->size = bitmap_size << PAGE_SHIFT;
1909 mapping->bits = BITS_PER_BYTE * bitmap_size; 1967 mapping->bits = BITS_PER_BYTE * bitmap_size;
1910 mapping->order = order; 1968
1911 spin_lock_init(&mapping->lock); 1969 spin_lock_init(&mapping->lock);
1912 1970
1913 mapping->domain = iommu_domain_alloc(bus); 1971 mapping->domain = iommu_domain_alloc(bus);
1914 if (!mapping->domain) 1972 if (!mapping->domain)
1915 goto err3; 1973 goto err4;
1916 1974
1917 kref_init(&mapping->kref); 1975 kref_init(&mapping->kref);
1918 return mapping; 1976 return mapping;
1977err4:
1978 kfree(mapping->bitmaps[0]);
1919err3: 1979err3:
1920 kfree(mapping->bitmap); 1980 kfree(mapping->bitmaps);
1921err2: 1981err2:
1922 kfree(mapping); 1982 kfree(mapping);
1923err: 1983err:
@@ -1927,14 +1987,35 @@ EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
1927 1987
1928static void release_iommu_mapping(struct kref *kref) 1988static void release_iommu_mapping(struct kref *kref)
1929{ 1989{
1990 int i;
1930 struct dma_iommu_mapping *mapping = 1991 struct dma_iommu_mapping *mapping =
1931 container_of(kref, struct dma_iommu_mapping, kref); 1992 container_of(kref, struct dma_iommu_mapping, kref);
1932 1993
1933 iommu_domain_free(mapping->domain); 1994 iommu_domain_free(mapping->domain);
1934 kfree(mapping->bitmap); 1995 for (i = 0; i < mapping->nr_bitmaps; i++)
1996 kfree(mapping->bitmaps[i]);
1997 kfree(mapping->bitmaps);
1935 kfree(mapping); 1998 kfree(mapping);
1936} 1999}
1937 2000
2001static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
2002{
2003 int next_bitmap;
2004
2005 if (mapping->nr_bitmaps > mapping->extensions)
2006 return -EINVAL;
2007
2008 next_bitmap = mapping->nr_bitmaps;
2009 mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size,
2010 GFP_ATOMIC);
2011 if (!mapping->bitmaps[next_bitmap])
2012 return -ENOMEM;
2013
2014 mapping->nr_bitmaps++;
2015
2016 return 0;
2017}
2018
1938void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) 2019void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
1939{ 2020{
1940 if (mapping) 2021 if (mapping)
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index ef69152f9b52..c508f41a43bc 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -120,34 +120,51 @@ static const struct prot_bits pte_bits[] = {
120}; 120};
121 121
122static const struct prot_bits section_bits[] = { 122static const struct prot_bits section_bits[] = {
123#ifndef CONFIG_ARM_LPAE 123#ifdef CONFIG_ARM_LPAE
124 /* These are approximate */ 124 {
125 .mask = PMD_SECT_USER,
126 .val = PMD_SECT_USER,
127 .set = "USR",
128 }, {
129 .mask = PMD_SECT_RDONLY,
130 .val = PMD_SECT_RDONLY,
131 .set = "ro",
132 .clear = "RW",
133#elif __LINUX_ARM_ARCH__ >= 6
125 { 134 {
126 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, 135 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
127 .val = 0, 136 .val = PMD_SECT_APX | PMD_SECT_AP_WRITE,
128 .set = " ro", 137 .set = " ro",
129 }, { 138 }, {
130 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, 139 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
131 .val = PMD_SECT_AP_WRITE, 140 .val = PMD_SECT_AP_WRITE,
132 .set = " RW", 141 .set = " RW",
133 }, { 142 }, {
134 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, 143 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
135 .val = PMD_SECT_AP_READ, 144 .val = PMD_SECT_AP_READ,
136 .set = "USR ro", 145 .set = "USR ro",
137 }, { 146 }, {
138 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, 147 .mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
139 .val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE, 148 .val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
140 .set = "USR RW", 149 .set = "USR RW",
141#else 150#else /* ARMv4/ARMv5 */
151 /* These are approximate */
142 { 152 {
143 .mask = PMD_SECT_USER, 153 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
144 .val = PMD_SECT_USER, 154 .val = 0,
145 .set = "USR", 155 .set = " ro",
146 }, { 156 }, {
147 .mask = PMD_SECT_RDONLY, 157 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
148 .val = PMD_SECT_RDONLY, 158 .val = PMD_SECT_AP_WRITE,
149 .set = "ro", 159 .set = " RW",
150 .clear = "RW", 160 }, {
161 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
162 .val = PMD_SECT_AP_READ,
163 .set = "USR ro",
164 }, {
165 .mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
166 .val = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
167 .set = "USR RW",
151#endif 168#endif
152 }, { 169 }, {
153 .mask = PMD_SECT_XN, 170 .mask = PMD_SECT_XN,
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 804d61566a53..2a77ba8796ae 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -323,6 +323,8 @@ void __init arm_memblock_init(struct meminfo *mi,
323 if (mdesc->reserve) 323 if (mdesc->reserve)
324 mdesc->reserve(); 324 mdesc->reserve();
325 325
326 early_init_fdt_scan_reserved_mem();
327
326 /* 328 /*
327 * reserve memory for DMA contigouos allocations, 329 * reserve memory for DMA contigouos allocations,
328 * must come from DMA area inside low memory 330 * must come from DMA area inside low memory
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index a623cb3ad012..b68c6b22e1c8 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -516,6 +516,16 @@ static void __init build_mem_type_table(void)
516 s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2; 516 s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
517 517
518 /* 518 /*
519 * We don't use domains on ARMv6 (since this causes problems with
520 * v6/v7 kernels), so we must use a separate memory type for user
521 * r/o, kernel r/w to map the vectors page.
522 */
523#ifndef CONFIG_ARM_LPAE
524 if (cpu_arch == CPU_ARCH_ARMv6)
525 vecs_pgprot |= L_PTE_MT_VECTORS;
526#endif
527
528 /*
519 * ARMv6 and above have extended page tables. 529 * ARMv6 and above have extended page tables.
520 */ 530 */
521 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { 531 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index e3c48a3fe063..ee1d80593958 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -112,13 +112,9 @@
112 * 100x 1 0 1 r/o no acc 112 * 100x 1 0 1 r/o no acc
113 * 10x0 1 0 1 r/o no acc 113 * 10x0 1 0 1 r/o no acc
114 * 1011 0 0 1 r/w no acc 114 * 1011 0 0 1 r/w no acc
115 * 110x 0 1 0 r/w r/o
116 * 11x0 0 1 0 r/w r/o
117 * 1111 0 1 1 r/w r/w
118 *
119 * If !CONFIG_CPU_USE_DOMAINS, the following permissions are changed:
120 * 110x 1 1 1 r/o r/o 115 * 110x 1 1 1 r/o r/o
121 * 11x0 1 1 1 r/o r/o 116 * 11x0 1 1 1 r/o r/o
117 * 1111 0 1 1 r/w r/w
122 */ 118 */
123 .macro armv6_mt_table pfx 119 .macro armv6_mt_table pfx
124\pfx\()_mt_table: 120\pfx\()_mt_table:
@@ -137,7 +133,7 @@
137 .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED 133 .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED
138 .long 0x00 @ unused 134 .long 0x00 @ unused
139 .long 0x00 @ unused 135 .long 0x00 @ unused
140 .long 0x00 @ unused 136 .long PTE_CACHEABLE | PTE_BUFFERABLE | PTE_EXT_APX @ L_PTE_MT_VECTORS
141 .endm 137 .endm
142 138
143 .macro armv6_set_pte_ext pfx 139 .macro armv6_set_pte_ext pfx
@@ -158,24 +154,21 @@
158 154
159 tst r1, #L_PTE_USER 155 tst r1, #L_PTE_USER
160 orrne r3, r3, #PTE_EXT_AP1 156 orrne r3, r3, #PTE_EXT_AP1
161#ifdef CONFIG_CPU_USE_DOMAINS
162 @ allow kernel read/write access to read-only user pages
163 tstne r3, #PTE_EXT_APX 157 tstne r3, #PTE_EXT_APX
164 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 158
165#endif 159 @ user read-only -> kernel read-only
160 bicne r3, r3, #PTE_EXT_AP0
166 161
167 tst r1, #L_PTE_XN 162 tst r1, #L_PTE_XN
168 orrne r3, r3, #PTE_EXT_XN 163 orrne r3, r3, #PTE_EXT_XN
169 164
170 orr r3, r3, r2 165 eor r3, r3, r2
171 166
172 tst r1, #L_PTE_YOUNG 167 tst r1, #L_PTE_YOUNG
173 tstne r1, #L_PTE_PRESENT 168 tstne r1, #L_PTE_PRESENT
174 moveq r3, #0 169 moveq r3, #0
175#ifndef CONFIG_CPU_USE_DOMAINS
176 tstne r1, #L_PTE_NONE 170 tstne r1, #L_PTE_NONE
177 movne r3, #0 171 movne r3, #0
178#endif
179 172
180 str r3, [r0] 173 str r3, [r0]
181 mcr p15, 0, r0, c7, c10, 1 @ flush_pte 174 mcr p15, 0, r0, c7, c10, 1 @ flush_pte
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index bdd3be4be77a..1f52915f2b28 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -90,21 +90,14 @@ ENTRY(cpu_v7_set_pte_ext)
90 90
91 tst r1, #L_PTE_USER 91 tst r1, #L_PTE_USER
92 orrne r3, r3, #PTE_EXT_AP1 92 orrne r3, r3, #PTE_EXT_AP1
93#ifdef CONFIG_CPU_USE_DOMAINS
94 @ allow kernel read/write access to read-only user pages
95 tstne r3, #PTE_EXT_APX
96 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
97#endif
98 93
99 tst r1, #L_PTE_XN 94 tst r1, #L_PTE_XN
100 orrne r3, r3, #PTE_EXT_XN 95 orrne r3, r3, #PTE_EXT_XN
101 96
102 tst r1, #L_PTE_YOUNG 97 tst r1, #L_PTE_YOUNG
103 tstne r1, #L_PTE_VALID 98 tstne r1, #L_PTE_VALID
104#ifndef CONFIG_CPU_USE_DOMAINS
105 eorne r1, r1, #L_PTE_NONE 99 eorne r1, r1, #L_PTE_NONE
106 tstne r1, #L_PTE_NONE 100 tstne r1, #L_PTE_NONE
107#endif
108 moveq r3, #0 101 moveq r3, #0
109 102
110 ARM( str r3, [r0, #2048]! ) 103 ARM( str r3, [r0, #2048]! )
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 74f6033e76dd..195731d3813b 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -192,6 +192,7 @@ __v7_cr7mp_setup:
192 mov r10, #(1 << 0) @ Cache/TLB ops broadcasting 192 mov r10, #(1 << 0) @ Cache/TLB ops broadcasting
193 b 1f 193 b 1f
194__v7_ca7mp_setup: 194__v7_ca7mp_setup:
195__v7_ca12mp_setup:
195__v7_ca15mp_setup: 196__v7_ca15mp_setup:
196 mov r10, #0 197 mov r10, #0
1971: 1981:
@@ -484,6 +485,16 @@ __v7_ca7mp_proc_info:
484 .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info 485 .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
485 486
486 /* 487 /*
488 * ARM Ltd. Cortex A12 processor.
489 */
490 .type __v7_ca12mp_proc_info, #object
491__v7_ca12mp_proc_info:
492 .long 0x410fc0d0
493 .long 0xff0ffff0
494 __v7_proc __v7_ca12mp_setup
495 .size __v7_ca12mp_proc_info, . - __v7_ca12mp_proc_info
496
497 /*
487 * ARM Ltd. Cortex A15 processor. 498 * ARM Ltd. Cortex A15 processor.
488 */ 499 */
489 .type __v7_ca15mp_proc_info, #object 500 .type __v7_ca15mp_proc_info, #object