aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2011-01-06 20:29:26 -0500
committerPaul Mundt <lethal@linux-sh.org>2011-01-06 20:29:26 -0500
commit5e93c6b4ecd78b1bab49bad1dc2f6ed7ec0115ee (patch)
tree4f4e321a1ca0baf64d8af528080c71f93495a7d7 /arch/arm/mm
parent98d27b8abf413a310df6676f7d2128ada1cccc08 (diff)
parent3c0cb7c31c206aaedb967e44b98442bbeb17a6c4 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6 into rmobile-latest
Conflicts: arch/arm/mach-shmobile/Kconfig Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig43
-rw-r--r--arch/arm/mm/Makefile4
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c37
-rw-r--r--arch/arm/mm/cache-xsc3l2.c57
-rw-r--r--arch/arm/mm/dma-mapping.c35
-rw-r--r--arch/arm/mm/fault-armv.c2
-rw-r--r--arch/arm/mm/fault.c2
-rw-r--r--arch/arm/mm/flush.c7
-rw-r--r--arch/arm/mm/highmem.c87
-rw-r--r--arch/arm/mm/idmap.c67
-rw-r--r--arch/arm/mm/ioremap.c8
-rw-r--r--arch/arm/mm/mm.h2
-rw-r--r--arch/arm/mm/mmu.c68
-rw-r--r--arch/arm/mm/pgd.c37
-rw-r--r--arch/arm/mm/proc-macros.S37
-rw-r--r--arch/arm/mm/proc-v7.S27
-rw-r--r--arch/arm/mm/proc-xscale.S4
17 files changed, 260 insertions, 264 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 8206842ddd9e..fcc1e628e050 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -382,6 +382,12 @@ config CPU_FEROCEON_OLD_ID
382 for which the CPU ID is equal to the ARM926 ID. 382 for which the CPU ID is equal to the ARM926 ID.
383 Relevant for Feroceon-1850 and early Feroceon-2850. 383 Relevant for Feroceon-1850 and early Feroceon-2850.
384 384
385# Marvell PJ4
386config CPU_PJ4
387 bool
388 select CPU_V7
389 select ARM_THUMBEE
390
385# ARMv6 391# ARMv6
386config CPU_V6 392config CPU_V6
387 bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX || ARCH_DOVE 393 bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX || ARCH_DOVE
@@ -599,6 +605,14 @@ config CPU_CP15_MPU
599 help 605 help
600 Processor has the CP15 register, which has MPU related registers. 606 Processor has the CP15 register, which has MPU related registers.
601 607
608config CPU_USE_DOMAINS
609 bool
610 depends on MMU
611 default y if !CPU_32v6K
612 help
613 This option enables or disables the use of domain switching
614 via the set_fs() function.
615
602# 616#
603# CPU supports 36-bit I/O 617# CPU supports 36-bit I/O
604# 618#
@@ -628,6 +642,33 @@ config ARM_THUMBEE
628 Say Y here if you have a CPU with the ThumbEE extension and code to 642 Say Y here if you have a CPU with the ThumbEE extension and code to
629 make use of it. Say N for code that can run on CPUs without ThumbEE. 643 make use of it. Say N for code that can run on CPUs without ThumbEE.
630 644
645config SWP_EMULATE
646 bool "Emulate SWP/SWPB instructions"
647 depends on CPU_V7
648 select HAVE_PROC_CPU if PROC_FS
649 default y if SMP
650 help
651 ARMv6 architecture deprecates use of the SWP/SWPB instructions.
652 ARMv7 multiprocessing extensions introduce the ability to disable
653 these instructions, triggering an undefined instruction exception
654 when executed. Say Y here to enable software emulation of these
655 instructions for userspace (not kernel) using LDREX/STREX.
656 Also creates /proc/cpu/swp_emulation for statistics.
657
658 In some older versions of glibc [<=2.8] SWP is used during futex
659 trylock() operations with the assumption that the code will not
660 be preempted. This invalid assumption may be more likely to fail
661 with SWP emulation enabled, leading to deadlock of the user
662 application.
663
664 NOTE: when accessing uncached shared regions, LDREX/STREX rely
665 on an external transaction monitoring block called a global
666 monitor to maintain update atomicity. If your system does not
667 implement a global monitor, this option can cause programs that
668 perform SWP operations to uncached memory to deadlock.
669
670 If unsure, say Y.
671
631config CPU_BIG_ENDIAN 672config CPU_BIG_ENDIAN
632 bool "Build big-endian kernel" 673 bool "Build big-endian kernel"
633 depends on ARCH_SUPPORTS_BIG_ENDIAN 674 depends on ARCH_SUPPORTS_BIG_ENDIAN
@@ -789,7 +830,7 @@ config CACHE_PL310
789 830
790config CACHE_TAUROS2 831config CACHE_TAUROS2
791 bool "Enable the Tauros2 L2 cache controller" 832 bool "Enable the Tauros2 L2 cache controller"
792 depends on (ARCH_DOVE || ARCH_MMP) 833 depends on (ARCH_DOVE || ARCH_MMP || CPU_PJ4)
793 default y 834 default y
794 select OUTER_CACHE 835 select OUTER_CACHE
795 help 836 help
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index d63b6c413758..00d74a04af3a 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -5,8 +5,8 @@
5obj-y := dma-mapping.o extable.o fault.o init.o \ 5obj-y := dma-mapping.o extable.o fault.o init.o \
6 iomap.o 6 iomap.o
7 7
8obj-$(CONFIG_MMU) += fault-armv.o flush.o ioremap.o mmap.o \ 8obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
9 pgd.o mmu.o vmregion.o 9 mmap.o pgd.o mmu.o vmregion.o
10 10
11ifneq ($(CONFIG_MMU),y) 11ifneq ($(CONFIG_MMU),y)
12obj-y += nommu.o 12obj-y += nommu.o
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index 6e77c042d8e9..e0b0e7a4ec68 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -13,13 +13,9 @@
13 */ 13 */
14 14
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/highmem.h>
16#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
17#include <asm/kmap_types.h>
18#include <asm/fixmap.h>
19#include <asm/pgtable.h>
20#include <asm/tlbflush.h>
21#include <plat/cache-feroceon-l2.h> 18#include <plat/cache-feroceon-l2.h>
22#include "mm.h"
23 19
24/* 20/*
25 * Low-level cache maintenance operations. 21 * Low-level cache maintenance operations.
@@ -39,27 +35,30 @@
39 * between which we don't want to be preempted. 35 * between which we don't want to be preempted.
40 */ 36 */
41 37
42static inline unsigned long l2_start_va(unsigned long paddr) 38static inline unsigned long l2_get_va(unsigned long paddr)
43{ 39{
44#ifdef CONFIG_HIGHMEM 40#ifdef CONFIG_HIGHMEM
45 /* 41 /*
46 * Let's do our own fixmap stuff in a minimal way here.
47 * Because range ops can't be done on physical addresses, 42 * Because range ops can't be done on physical addresses,
48 * we simply install a virtual mapping for it only for the 43 * we simply install a virtual mapping for it only for the
49 * TLB lookup to occur, hence no need to flush the untouched 44 * TLB lookup to occur, hence no need to flush the untouched
50 * memory mapping. This is protected with the disabling of 45 * memory mapping afterwards (note: a cache flush may happen
51 * interrupts by the caller. 46 * in some circumstances depending on the path taken in kunmap_atomic).
52 */ 47 */
53 unsigned long idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id(); 48 void *vaddr = kmap_atomic_pfn(paddr >> PAGE_SHIFT);
54 unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 49 return (unsigned long)vaddr + (paddr & ~PAGE_MASK);
55 set_pte_ext(TOP_PTE(vaddr), pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL), 0);
56 local_flush_tlb_kernel_page(vaddr);
57 return vaddr + (paddr & ~PAGE_MASK);
58#else 50#else
59 return __phys_to_virt(paddr); 51 return __phys_to_virt(paddr);
60#endif 52#endif
61} 53}
62 54
55static inline void l2_put_va(unsigned long vaddr)
56{
57#ifdef CONFIG_HIGHMEM
58 kunmap_atomic((void *)vaddr);
59#endif
60}
61
63static inline void l2_clean_pa(unsigned long addr) 62static inline void l2_clean_pa(unsigned long addr)
64{ 63{
65 __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr)); 64 __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr));
@@ -76,13 +75,14 @@ static inline void l2_clean_pa_range(unsigned long start, unsigned long end)
76 */ 75 */
77 BUG_ON((start ^ end) >> PAGE_SHIFT); 76 BUG_ON((start ^ end) >> PAGE_SHIFT);
78 77
79 raw_local_irq_save(flags); 78 va_start = l2_get_va(start);
80 va_start = l2_start_va(start);
81 va_end = va_start + (end - start); 79 va_end = va_start + (end - start);
80 raw_local_irq_save(flags);
82 __asm__("mcr p15, 1, %0, c15, c9, 4\n\t" 81 __asm__("mcr p15, 1, %0, c15, c9, 4\n\t"
83 "mcr p15, 1, %1, c15, c9, 5" 82 "mcr p15, 1, %1, c15, c9, 5"
84 : : "r" (va_start), "r" (va_end)); 83 : : "r" (va_start), "r" (va_end));
85 raw_local_irq_restore(flags); 84 raw_local_irq_restore(flags);
85 l2_put_va(va_start);
86} 86}
87 87
88static inline void l2_clean_inv_pa(unsigned long addr) 88static inline void l2_clean_inv_pa(unsigned long addr)
@@ -106,13 +106,14 @@ static inline void l2_inv_pa_range(unsigned long start, unsigned long end)
106 */ 106 */
107 BUG_ON((start ^ end) >> PAGE_SHIFT); 107 BUG_ON((start ^ end) >> PAGE_SHIFT);
108 108
109 raw_local_irq_save(flags); 109 va_start = l2_get_va(start);
110 va_start = l2_start_va(start);
111 va_end = va_start + (end - start); 110 va_end = va_start + (end - start);
111 raw_local_irq_save(flags);
112 __asm__("mcr p15, 1, %0, c15, c11, 4\n\t" 112 __asm__("mcr p15, 1, %0, c15, c11, 4\n\t"
113 "mcr p15, 1, %1, c15, c11, 5" 113 "mcr p15, 1, %1, c15, c11, 5"
114 : : "r" (va_start), "r" (va_end)); 114 : : "r" (va_start), "r" (va_end));
115 raw_local_irq_restore(flags); 115 raw_local_irq_restore(flags);
116 l2_put_va(va_start);
116} 117}
117 118
118static inline void l2_inv_all(void) 119static inline void l2_inv_all(void)
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c
index c3154928bccd..5a32020471e3 100644
--- a/arch/arm/mm/cache-xsc3l2.c
+++ b/arch/arm/mm/cache-xsc3l2.c
@@ -17,14 +17,10 @@
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/highmem.h>
20#include <asm/system.h> 21#include <asm/system.h>
21#include <asm/cputype.h> 22#include <asm/cputype.h>
22#include <asm/cacheflush.h> 23#include <asm/cacheflush.h>
23#include <asm/kmap_types.h>
24#include <asm/fixmap.h>
25#include <asm/pgtable.h>
26#include <asm/tlbflush.h>
27#include "mm.h"
28 24
29#define CR_L2 (1 << 26) 25#define CR_L2 (1 << 26)
30 26
@@ -71,16 +67,15 @@ static inline void xsc3_l2_inv_all(void)
71 dsb(); 67 dsb();
72} 68}
73 69
70static inline void l2_unmap_va(unsigned long va)
71{
74#ifdef CONFIG_HIGHMEM 72#ifdef CONFIG_HIGHMEM
75#define l2_map_save_flags(x) raw_local_save_flags(x) 73 if (va != -1)
76#define l2_map_restore_flags(x) raw_local_irq_restore(x) 74 kunmap_atomic((void *)va);
77#else
78#define l2_map_save_flags(x) ((x) = 0)
79#define l2_map_restore_flags(x) ((void)(x))
80#endif 75#endif
76}
81 77
82static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va, 78static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va)
83 unsigned long flags)
84{ 79{
85#ifdef CONFIG_HIGHMEM 80#ifdef CONFIG_HIGHMEM
86 unsigned long va = prev_va & PAGE_MASK; 81 unsigned long va = prev_va & PAGE_MASK;
@@ -89,17 +84,10 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va,
89 /* 84 /*
90 * Switching to a new page. Because cache ops are 85 * Switching to a new page. Because cache ops are
91 * using virtual addresses only, we must put a mapping 86 * using virtual addresses only, we must put a mapping
92 * in place for it. We also enable interrupts for a 87 * in place for it.
93 * short while and disable them again to protect this
94 * mapping.
95 */ 88 */
96 unsigned long idx; 89 l2_unmap_va(prev_va);
97 raw_local_irq_restore(flags); 90 va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT);
98 idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id();
99 va = __fix_to_virt(FIX_KMAP_BEGIN + idx);
100 raw_local_irq_restore(flags | PSR_I_BIT);
101 set_pte_ext(TOP_PTE(va), pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL), 0);
102 local_flush_tlb_kernel_page(va);
103 } 91 }
104 return va + (pa_offset >> (32 - PAGE_SHIFT)); 92 return va + (pa_offset >> (32 - PAGE_SHIFT));
105#else 93#else
@@ -109,7 +97,7 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va,
109 97
110static void xsc3_l2_inv_range(unsigned long start, unsigned long end) 98static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
111{ 99{
112 unsigned long vaddr, flags; 100 unsigned long vaddr;
113 101
114 if (start == 0 && end == -1ul) { 102 if (start == 0 && end == -1ul) {
115 xsc3_l2_inv_all(); 103 xsc3_l2_inv_all();
@@ -117,13 +105,12 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
117 } 105 }
118 106
119 vaddr = -1; /* to force the first mapping */ 107 vaddr = -1; /* to force the first mapping */
120 l2_map_save_flags(flags);
121 108
122 /* 109 /*
123 * Clean and invalidate partial first cache line. 110 * Clean and invalidate partial first cache line.
124 */ 111 */
125 if (start & (CACHE_LINE_SIZE - 1)) { 112 if (start & (CACHE_LINE_SIZE - 1)) {
126 vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr, flags); 113 vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr);
127 xsc3_l2_clean_mva(vaddr); 114 xsc3_l2_clean_mva(vaddr);
128 xsc3_l2_inv_mva(vaddr); 115 xsc3_l2_inv_mva(vaddr);
129 start = (start | (CACHE_LINE_SIZE - 1)) + 1; 116 start = (start | (CACHE_LINE_SIZE - 1)) + 1;
@@ -133,7 +120,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
133 * Invalidate all full cache lines between 'start' and 'end'. 120 * Invalidate all full cache lines between 'start' and 'end'.
134 */ 121 */
135 while (start < (end & ~(CACHE_LINE_SIZE - 1))) { 122 while (start < (end & ~(CACHE_LINE_SIZE - 1))) {
136 vaddr = l2_map_va(start, vaddr, flags); 123 vaddr = l2_map_va(start, vaddr);
137 xsc3_l2_inv_mva(vaddr); 124 xsc3_l2_inv_mva(vaddr);
138 start += CACHE_LINE_SIZE; 125 start += CACHE_LINE_SIZE;
139 } 126 }
@@ -142,31 +129,30 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
142 * Clean and invalidate partial last cache line. 129 * Clean and invalidate partial last cache line.
143 */ 130 */
144 if (start < end) { 131 if (start < end) {
145 vaddr = l2_map_va(start, vaddr, flags); 132 vaddr = l2_map_va(start, vaddr);
146 xsc3_l2_clean_mva(vaddr); 133 xsc3_l2_clean_mva(vaddr);
147 xsc3_l2_inv_mva(vaddr); 134 xsc3_l2_inv_mva(vaddr);
148 } 135 }
149 136
150 l2_map_restore_flags(flags); 137 l2_unmap_va(vaddr);
151 138
152 dsb(); 139 dsb();
153} 140}
154 141
155static void xsc3_l2_clean_range(unsigned long start, unsigned long end) 142static void xsc3_l2_clean_range(unsigned long start, unsigned long end)
156{ 143{
157 unsigned long vaddr, flags; 144 unsigned long vaddr;
158 145
159 vaddr = -1; /* to force the first mapping */ 146 vaddr = -1; /* to force the first mapping */
160 l2_map_save_flags(flags);
161 147
162 start &= ~(CACHE_LINE_SIZE - 1); 148 start &= ~(CACHE_LINE_SIZE - 1);
163 while (start < end) { 149 while (start < end) {
164 vaddr = l2_map_va(start, vaddr, flags); 150 vaddr = l2_map_va(start, vaddr);
165 xsc3_l2_clean_mva(vaddr); 151 xsc3_l2_clean_mva(vaddr);
166 start += CACHE_LINE_SIZE; 152 start += CACHE_LINE_SIZE;
167 } 153 }
168 154
169 l2_map_restore_flags(flags); 155 l2_unmap_va(vaddr);
170 156
171 dsb(); 157 dsb();
172} 158}
@@ -193,7 +179,7 @@ static inline void xsc3_l2_flush_all(void)
193 179
194static void xsc3_l2_flush_range(unsigned long start, unsigned long end) 180static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
195{ 181{
196 unsigned long vaddr, flags; 182 unsigned long vaddr;
197 183
198 if (start == 0 && end == -1ul) { 184 if (start == 0 && end == -1ul) {
199 xsc3_l2_flush_all(); 185 xsc3_l2_flush_all();
@@ -201,17 +187,16 @@ static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
201 } 187 }
202 188
203 vaddr = -1; /* to force the first mapping */ 189 vaddr = -1; /* to force the first mapping */
204 l2_map_save_flags(flags);
205 190
206 start &= ~(CACHE_LINE_SIZE - 1); 191 start &= ~(CACHE_LINE_SIZE - 1);
207 while (start < end) { 192 while (start < end) {
208 vaddr = l2_map_va(start, vaddr, flags); 193 vaddr = l2_map_va(start, vaddr);
209 xsc3_l2_clean_mva(vaddr); 194 xsc3_l2_clean_mva(vaddr);
210 xsc3_l2_inv_mva(vaddr); 195 xsc3_l2_inv_mva(vaddr);
211 start += CACHE_LINE_SIZE; 196 start += CACHE_LINE_SIZE;
212 } 197 }
213 198
214 l2_map_restore_flags(flags); 199 l2_unmap_va(vaddr);
215 200
216 dsb(); 201 dsb();
217} 202}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index ac6a36142fcd..6b48e0a3d7aa 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -17,6 +17,7 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/device.h> 18#include <linux/device.h>
19#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
20#include <linux/highmem.h>
20 21
21#include <asm/memory.h> 22#include <asm/memory.h>
22#include <asm/highmem.h> 23#include <asm/highmem.h>
@@ -311,7 +312,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
311 addr = page_address(page); 312 addr = page_address(page);
312 313
313 if (addr) 314 if (addr)
314 *handle = page_to_dma(dev, page); 315 *handle = pfn_to_dma(dev, page_to_pfn(page));
315 316
316 return addr; 317 return addr;
317} 318}
@@ -406,7 +407,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
406 if (!arch_is_coherent()) 407 if (!arch_is_coherent())
407 __dma_free_remap(cpu_addr, size); 408 __dma_free_remap(cpu_addr, size);
408 409
409 __dma_free_buffer(dma_to_page(dev, handle), size); 410 __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
410} 411}
411EXPORT_SYMBOL(dma_free_coherent); 412EXPORT_SYMBOL(dma_free_coherent);
412 413
@@ -480,10 +481,10 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
480 op(vaddr, len, dir); 481 op(vaddr, len, dir);
481 kunmap_high(page); 482 kunmap_high(page);
482 } else if (cache_is_vipt()) { 483 } else if (cache_is_vipt()) {
483 pte_t saved_pte; 484 /* unmapped pages might still be cached */
484 vaddr = kmap_high_l1_vipt(page, &saved_pte); 485 vaddr = kmap_atomic(page);
485 op(vaddr + offset, len, dir); 486 op(vaddr + offset, len, dir);
486 kunmap_high_l1_vipt(page, saved_pte); 487 kunmap_atomic(vaddr);
487 } 488 }
488 } else { 489 } else {
489 vaddr = page_address(page) + offset; 490 vaddr = page_address(page) + offset;
@@ -554,17 +555,20 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
554 struct scatterlist *s; 555 struct scatterlist *s;
555 int i, j; 556 int i, j;
556 557
558 BUG_ON(!valid_dma_direction(dir));
559
557 for_each_sg(sg, s, nents, i) { 560 for_each_sg(sg, s, nents, i) {
558 s->dma_address = dma_map_page(dev, sg_page(s), s->offset, 561 s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
559 s->length, dir); 562 s->length, dir);
560 if (dma_mapping_error(dev, s->dma_address)) 563 if (dma_mapping_error(dev, s->dma_address))
561 goto bad_mapping; 564 goto bad_mapping;
562 } 565 }
566 debug_dma_map_sg(dev, sg, nents, nents, dir);
563 return nents; 567 return nents;
564 568
565 bad_mapping: 569 bad_mapping:
566 for_each_sg(sg, s, i, j) 570 for_each_sg(sg, s, i, j)
567 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); 571 __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
568 return 0; 572 return 0;
569} 573}
570EXPORT_SYMBOL(dma_map_sg); 574EXPORT_SYMBOL(dma_map_sg);
@@ -585,8 +589,10 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
585 struct scatterlist *s; 589 struct scatterlist *s;
586 int i; 590 int i;
587 591
592 debug_dma_unmap_sg(dev, sg, nents, dir);
593
588 for_each_sg(sg, s, nents, i) 594 for_each_sg(sg, s, nents, i)
589 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); 595 __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
590} 596}
591EXPORT_SYMBOL(dma_unmap_sg); 597EXPORT_SYMBOL(dma_unmap_sg);
592 598
@@ -611,6 +617,8 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
611 __dma_page_dev_to_cpu(sg_page(s), s->offset, 617 __dma_page_dev_to_cpu(sg_page(s), s->offset,
612 s->length, dir); 618 s->length, dir);
613 } 619 }
620
621 debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
614} 622}
615EXPORT_SYMBOL(dma_sync_sg_for_cpu); 623EXPORT_SYMBOL(dma_sync_sg_for_cpu);
616 624
@@ -635,5 +643,16 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
635 __dma_page_cpu_to_dev(sg_page(s), s->offset, 643 __dma_page_cpu_to_dev(sg_page(s), s->offset,
636 s->length, dir); 644 s->length, dir);
637 } 645 }
646
647 debug_dma_sync_sg_for_device(dev, sg, nents, dir);
638} 648}
639EXPORT_SYMBOL(dma_sync_sg_for_device); 649EXPORT_SYMBOL(dma_sync_sg_for_device);
650
651#define PREALLOC_DMA_DEBUG_ENTRIES 4096
652
653static int __init dma_debug_do_init(void)
654{
655 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
656 return 0;
657}
658fs_initcall(dma_debug_do_init);
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 83e59f870426..01210dba0221 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -26,7 +26,7 @@
26 26
27#include "mm.h" 27#include "mm.h"
28 28
29static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; 29static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE;
30 30
31#if __LINUX_ARM_ARCH__ < 6 31#if __LINUX_ARM_ARCH__ < 6
32/* 32/*
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 1e21e125fe3a..f10f9bac2206 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -108,7 +108,7 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
108 108
109 pte = pte_offset_map(pmd, addr); 109 pte = pte_offset_map(pmd, addr);
110 printk(", *pte=%08lx", pte_val(*pte)); 110 printk(", *pte=%08lx", pte_val(*pte));
111 printk(", *ppte=%08lx", pte_val(pte[-PTRS_PER_PTE])); 111 printk(", *ppte=%08lx", pte_val(pte[PTE_HWTABLE_PTRS]));
112 pte_unmap(pte); 112 pte_unmap(pte);
113 } while(0); 113 } while(0);
114 114
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 391ffae75098..c29f2839f1d2 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -10,6 +10,7 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/pagemap.h> 12#include <linux/pagemap.h>
13#include <linux/highmem.h>
13 14
14#include <asm/cacheflush.h> 15#include <asm/cacheflush.h>
15#include <asm/cachetype.h> 16#include <asm/cachetype.h>
@@ -180,10 +181,10 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
180 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 181 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
181 kunmap_high(page); 182 kunmap_high(page);
182 } else if (cache_is_vipt()) { 183 } else if (cache_is_vipt()) {
183 pte_t saved_pte; 184 /* unmapped pages might still be cached */
184 addr = kmap_high_l1_vipt(page, &saved_pte); 185 addr = kmap_atomic(page);
185 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 186 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
186 kunmap_high_l1_vipt(page, saved_pte); 187 kunmap_atomic(addr);
187 } 188 }
188 } 189 }
189 190
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index c435fd9e1da9..807c0573abbe 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -140,90 +140,3 @@ struct page *kmap_atomic_to_page(const void *ptr)
140 pte = TOP_PTE(vaddr); 140 pte = TOP_PTE(vaddr);
141 return pte_page(*pte); 141 return pte_page(*pte);
142} 142}
143
144#ifdef CONFIG_CPU_CACHE_VIPT
145
146#include <linux/percpu.h>
147
148/*
149 * The VIVT cache of a highmem page is always flushed before the page
150 * is unmapped. Hence unmapped highmem pages need no cache maintenance
151 * in that case.
152 *
153 * However unmapped pages may still be cached with a VIPT cache, and
154 * it is not possible to perform cache maintenance on them using physical
155 * addresses unfortunately. So we have no choice but to set up a temporary
156 * virtual mapping for that purpose.
157 *
158 * Yet this VIPT cache maintenance may be triggered from DMA support
159 * functions which are possibly called from interrupt context. As we don't
160 * want to keep interrupt disabled all the time when such maintenance is
161 * taking place, we therefore allow for some reentrancy by preserving and
162 * restoring the previous fixmap entry before the interrupted context is
163 * resumed. If the reentrancy depth is 0 then there is no need to restore
164 * the previous fixmap, and leaving the current one in place allow it to
165 * be reused the next time without a TLB flush (common with DMA).
166 */
167
168static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
169
170void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
171{
172 unsigned int idx, cpu;
173 int *depth;
174 unsigned long vaddr, flags;
175 pte_t pte, *ptep;
176
177 if (!in_interrupt())
178 preempt_disable();
179
180 cpu = smp_processor_id();
181 depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
182
183 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
184 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
185 ptep = TOP_PTE(vaddr);
186 pte = mk_pte(page, kmap_prot);
187
188 raw_local_irq_save(flags);
189 (*depth)++;
190 if (pte_val(*ptep) == pte_val(pte)) {
191 *saved_pte = pte;
192 } else {
193 *saved_pte = *ptep;
194 set_pte_ext(ptep, pte, 0);
195 local_flush_tlb_kernel_page(vaddr);
196 }
197 raw_local_irq_restore(flags);
198
199 return (void *)vaddr;
200}
201
202void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
203{
204 unsigned int idx, cpu = smp_processor_id();
205 int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
206 unsigned long vaddr, flags;
207 pte_t pte, *ptep;
208
209 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
210 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
211 ptep = TOP_PTE(vaddr);
212 pte = mk_pte(page, kmap_prot);
213
214 BUG_ON(pte_val(*ptep) != pte_val(pte));
215 BUG_ON(*depth <= 0);
216
217 raw_local_irq_save(flags);
218 (*depth)--;
219 if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
220 set_pte_ext(ptep, saved_pte, 0);
221 local_flush_tlb_kernel_page(vaddr);
222 }
223 raw_local_irq_restore(flags);
224
225 if (!in_interrupt())
226 preempt_enable();
227}
228
229#endif /* CONFIG_CPU_CACHE_VIPT */
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
new file mode 100644
index 000000000000..57299446f787
--- /dev/null
+++ b/arch/arm/mm/idmap.c
@@ -0,0 +1,67 @@
1#include <linux/kernel.h>
2
3#include <asm/cputype.h>
4#include <asm/pgalloc.h>
5#include <asm/pgtable.h>
6
7static void idmap_add_pmd(pgd_t *pgd, unsigned long addr, unsigned long end,
8 unsigned long prot)
9{
10 pmd_t *pmd = pmd_offset(pgd, addr);
11
12 addr = (addr & PMD_MASK) | prot;
13 pmd[0] = __pmd(addr);
14 addr += SECTION_SIZE;
15 pmd[1] = __pmd(addr);
16 flush_pmd_entry(pmd);
17}
18
19void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
20{
21 unsigned long prot, next;
22
23 prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
24 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
25 prot |= PMD_BIT4;
26
27 pgd += pgd_index(addr);
28 do {
29 next = pgd_addr_end(addr, end);
30 idmap_add_pmd(pgd, addr, next, prot);
31 } while (pgd++, addr = next, addr != end);
32}
33
34#ifdef CONFIG_SMP
35static void idmap_del_pmd(pgd_t *pgd, unsigned long addr, unsigned long end)
36{
37 pmd_t *pmd = pmd_offset(pgd, addr);
38 pmd_clear(pmd);
39}
40
41void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end)
42{
43 unsigned long next;
44
45 pgd += pgd_index(addr);
46 do {
47 next = pgd_addr_end(addr, end);
48 idmap_del_pmd(pgd, addr, next);
49 } while (pgd++, addr = next, addr != end);
50}
51#endif
52
53/*
54 * In order to soft-boot, we need to insert a 1:1 mapping in place of
55 * the user-mode pages. This will then ensure that we have predictable
56 * results when turning the mmu off
57 */
58void setup_mm_for_reboot(char mode)
59{
60 /*
61 * We need to access to user-mode page tables here. For kernel threads
62 * we don't have any user-mode mappings so we use the context that we
63 * "borrowed".
64 */
65 identity_mapping_add(current->active_mm->pgd, 0, TASK_SIZE);
66 local_flush_tlb_all();
67}
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 55c17a6fb22f..ab506272b2d3 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -204,12 +204,8 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
204 /* 204 /*
205 * Don't allow RAM to be mapped - this causes problems with ARMv6+ 205 * Don't allow RAM to be mapped - this causes problems with ARMv6+
206 */ 206 */
207 if (pfn_valid(pfn)) { 207 if (WARN_ON(pfn_valid(pfn)))
208 printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n" 208 return NULL;
209 "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"
210 "will fail in the next kernel release. Please fix your driver.\n");
211 WARN_ON(1);
212 }
213 209
214 type = get_mem_type(mtype); 210 type = get_mem_type(mtype);
215 if (!type) 211 if (!type)
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 6630620380a4..36960df5fb76 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -16,7 +16,7 @@ static inline pmd_t *pmd_off_k(unsigned long virt)
16} 16}
17 17
18struct mem_type { 18struct mem_type {
19 unsigned int prot_pte; 19 pteval_t prot_pte;
20 unsigned int prot_l1; 20 unsigned int prot_l1;
21 unsigned int prot_sect; 21 unsigned int prot_sect;
22 unsigned int domain; 22 unsigned int domain;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 72ad3e1f56cf..3c67e92f7d59 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -24,6 +24,7 @@
24#include <asm/smp_plat.h> 24#include <asm/smp_plat.h>
25#include <asm/tlb.h> 25#include <asm/tlb.h>
26#include <asm/highmem.h> 26#include <asm/highmem.h>
27#include <asm/traps.h>
27 28
28#include <asm/mach/arch.h> 29#include <asm/mach/arch.h>
29#include <asm/mach/map.h> 30#include <asm/mach/map.h>
@@ -62,7 +63,7 @@ struct cachepolicy {
62 const char policy[16]; 63 const char policy[16];
63 unsigned int cr_mask; 64 unsigned int cr_mask;
64 unsigned int pmd; 65 unsigned int pmd;
65 unsigned int pte; 66 pteval_t pte;
66}; 67};
67 68
68static struct cachepolicy cache_policies[] __initdata = { 69static struct cachepolicy cache_policies[] __initdata = {
@@ -190,7 +191,7 @@ void adjust_cr(unsigned long mask, unsigned long set)
190} 191}
191#endif 192#endif
192 193
193#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE 194#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
194#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE 195#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
195 196
196static struct mem_type mem_types[] = { 197static struct mem_type mem_types[] = {
@@ -235,19 +236,18 @@ static struct mem_type mem_types[] = {
235 }, 236 },
236 [MT_LOW_VECTORS] = { 237 [MT_LOW_VECTORS] = {
237 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 238 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
238 L_PTE_EXEC, 239 L_PTE_RDONLY,
239 .prot_l1 = PMD_TYPE_TABLE, 240 .prot_l1 = PMD_TYPE_TABLE,
240 .domain = DOMAIN_USER, 241 .domain = DOMAIN_USER,
241 }, 242 },
242 [MT_HIGH_VECTORS] = { 243 [MT_HIGH_VECTORS] = {
243 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 244 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
244 L_PTE_USER | L_PTE_EXEC, 245 L_PTE_USER | L_PTE_RDONLY,
245 .prot_l1 = PMD_TYPE_TABLE, 246 .prot_l1 = PMD_TYPE_TABLE,
246 .domain = DOMAIN_USER, 247 .domain = DOMAIN_USER,
247 }, 248 },
248 [MT_MEMORY] = { 249 [MT_MEMORY] = {
249 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 250 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
250 L_PTE_WRITE | L_PTE_EXEC,
251 .prot_l1 = PMD_TYPE_TABLE, 251 .prot_l1 = PMD_TYPE_TABLE,
252 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 252 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
253 .domain = DOMAIN_KERNEL, 253 .domain = DOMAIN_KERNEL,
@@ -258,21 +258,20 @@ static struct mem_type mem_types[] = {
258 }, 258 },
259 [MT_MEMORY_NONCACHED] = { 259 [MT_MEMORY_NONCACHED] = {
260 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 260 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
261 L_PTE_WRITE | L_PTE_EXEC | L_PTE_MT_BUFFERABLE, 261 L_PTE_MT_BUFFERABLE,
262 .prot_l1 = PMD_TYPE_TABLE, 262 .prot_l1 = PMD_TYPE_TABLE,
263 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 263 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
264 .domain = DOMAIN_KERNEL, 264 .domain = DOMAIN_KERNEL,
265 }, 265 },
266 [MT_MEMORY_DTCM] = { 266 [MT_MEMORY_DTCM] = {
267 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 267 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
268 L_PTE_WRITE, 268 L_PTE_XN,
269 .prot_l1 = PMD_TYPE_TABLE, 269 .prot_l1 = PMD_TYPE_TABLE,
270 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 270 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
271 .domain = DOMAIN_KERNEL, 271 .domain = DOMAIN_KERNEL,
272 }, 272 },
273 [MT_MEMORY_ITCM] = { 273 [MT_MEMORY_ITCM] = {
274 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 274 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
275 L_PTE_WRITE | L_PTE_EXEC,
276 .prot_l1 = PMD_TYPE_TABLE, 275 .prot_l1 = PMD_TYPE_TABLE,
277 .domain = DOMAIN_KERNEL, 276 .domain = DOMAIN_KERNEL,
278 }, 277 },
@@ -479,7 +478,7 @@ static void __init build_mem_type_table(void)
479 478
480 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); 479 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
481 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | 480 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
482 L_PTE_DIRTY | L_PTE_WRITE | kern_pgprot); 481 L_PTE_DIRTY | kern_pgprot);
483 482
484 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; 483 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
485 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 484 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
@@ -535,7 +534,7 @@ static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned l
535{ 534{
536 if (pmd_none(*pmd)) { 535 if (pmd_none(*pmd)) {
537 pte_t *pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t)); 536 pte_t *pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t));
538 __pmd_populate(pmd, __pa(pte) | prot); 537 __pmd_populate(pmd, __pa(pte), prot);
539 } 538 }
540 BUG_ON(pmd_bad(*pmd)); 539 BUG_ON(pmd_bad(*pmd));
541 return pte_offset_kernel(pmd, addr); 540 return pte_offset_kernel(pmd, addr);
@@ -553,7 +552,7 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
553} 552}
554 553
555static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, 554static void __init alloc_init_section(pgd_t *pgd, unsigned long addr,
556 unsigned long end, unsigned long phys, 555 unsigned long end, phys_addr_t phys,
557 const struct mem_type *type) 556 const struct mem_type *type)
558{ 557{
559 pmd_t *pmd = pmd_offset(pgd, addr); 558 pmd_t *pmd = pmd_offset(pgd, addr);
@@ -588,7 +587,8 @@ static void __init alloc_init_section(pgd_t *pgd, unsigned long addr,
588static void __init create_36bit_mapping(struct map_desc *md, 587static void __init create_36bit_mapping(struct map_desc *md,
589 const struct mem_type *type) 588 const struct mem_type *type)
590{ 589{
591 unsigned long phys, addr, length, end; 590 unsigned long addr, length, end;
591 phys_addr_t phys;
592 pgd_t *pgd; 592 pgd_t *pgd;
593 593
594 addr = md->virtual; 594 addr = md->virtual;
@@ -914,12 +914,11 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
914{ 914{
915 struct map_desc map; 915 struct map_desc map;
916 unsigned long addr; 916 unsigned long addr;
917 void *vectors;
918 917
919 /* 918 /*
920 * Allocate the vector page early. 919 * Allocate the vector page early.
921 */ 920 */
922 vectors = early_alloc(PAGE_SIZE); 921 vectors_page = early_alloc(PAGE_SIZE);
923 922
924 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) 923 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
925 pmd_clear(pmd_off_k(addr)); 924 pmd_clear(pmd_off_k(addr));
@@ -959,7 +958,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
959 * location (0xffff0000). If we aren't using high-vectors, also 958 * location (0xffff0000). If we aren't using high-vectors, also
960 * create a mapping at the low-vectors virtual address. 959 * create a mapping at the low-vectors virtual address.
961 */ 960 */
962 map.pfn = __phys_to_pfn(virt_to_phys(vectors)); 961 map.pfn = __phys_to_pfn(virt_to_phys(vectors_page));
963 map.virtual = 0xffff0000; 962 map.virtual = 0xffff0000;
964 map.length = PAGE_SIZE; 963 map.length = PAGE_SIZE;
965 map.type = MT_HIGH_VECTORS; 964 map.type = MT_HIGH_VECTORS;
@@ -1044,38 +1043,3 @@ void __init paging_init(struct machine_desc *mdesc)
1044 empty_zero_page = virt_to_page(zero_page); 1043 empty_zero_page = virt_to_page(zero_page);
1045 __flush_dcache_page(NULL, empty_zero_page); 1044 __flush_dcache_page(NULL, empty_zero_page);
1046} 1045}
1047
1048/*
1049 * In order to soft-boot, we need to insert a 1:1 mapping in place of
1050 * the user-mode pages. This will then ensure that we have predictable
1051 * results when turning the mmu off
1052 */
1053void setup_mm_for_reboot(char mode)
1054{
1055 unsigned long base_pmdval;
1056 pgd_t *pgd;
1057 int i;
1058
1059 /*
1060 * We need to access to user-mode page tables here. For kernel threads
1061 * we don't have any user-mode mappings so we use the context that we
1062 * "borrowed".
1063 */
1064 pgd = current->active_mm->pgd;
1065
1066 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
1067 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
1068 base_pmdval |= PMD_BIT4;
1069
1070 for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
1071 unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval;
1072 pmd_t *pmd;
1073
1074 pmd = pmd_off(pgd, i << PGDIR_SHIFT);
1075 pmd[0] = __pmd(pmdval);
1076 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
1077 flush_pmd_entry(pmd);
1078 }
1079
1080 local_flush_tlb_all();
1081}
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index 69bbfc6645a6..93292a18cf77 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -17,12 +17,10 @@
17 17
18#include "mm.h" 18#include "mm.h"
19 19
20#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
21
22/* 20/*
23 * need to get a 16k page for level 1 21 * need to get a 16k page for level 1
24 */ 22 */
25pgd_t *get_pgd_slow(struct mm_struct *mm) 23pgd_t *pgd_alloc(struct mm_struct *mm)
26{ 24{
27 pgd_t *new_pgd, *init_pgd; 25 pgd_t *new_pgd, *init_pgd;
28 pmd_t *new_pmd, *init_pmd; 26 pmd_t *new_pmd, *init_pmd;
@@ -32,14 +30,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
32 if (!new_pgd) 30 if (!new_pgd)
33 goto no_pgd; 31 goto no_pgd;
34 32
35 memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); 33 memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
36 34
37 /* 35 /*
38 * Copy over the kernel and IO PGD entries 36 * Copy over the kernel and IO PGD entries
39 */ 37 */
40 init_pgd = pgd_offset_k(0); 38 init_pgd = pgd_offset_k(0);
41 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, 39 memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
42 (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); 40 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
43 41
44 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); 42 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
45 43
@@ -73,28 +71,29 @@ no_pgd:
73 return NULL; 71 return NULL;
74} 72}
75 73
76void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd) 74void pgd_free(struct mm_struct *mm, pgd_t *pgd_base)
77{ 75{
76 pgd_t *pgd;
78 pmd_t *pmd; 77 pmd_t *pmd;
79 pgtable_t pte; 78 pgtable_t pte;
80 79
81 if (!pgd) 80 if (!pgd_base)
82 return; 81 return;
83 82
84 /* pgd is always present and good */ 83 pgd = pgd_base + pgd_index(0);
85 pmd = pmd_off(pgd, 0); 84 if (pgd_none_or_clear_bad(pgd))
86 if (pmd_none(*pmd)) 85 goto no_pgd;
87 goto free; 86
88 if (pmd_bad(*pmd)) { 87 pmd = pmd_offset(pgd, 0);
89 pmd_ERROR(*pmd); 88 if (pmd_none_or_clear_bad(pmd))
90 pmd_clear(pmd); 89 goto no_pmd;
91 goto free;
92 }
93 90
94 pte = pmd_pgtable(*pmd); 91 pte = pmd_pgtable(*pmd);
95 pmd_clear(pmd); 92 pmd_clear(pmd);
96 pte_free(mm, pte); 93 pte_free(mm, pte);
94no_pmd:
95 pgd_clear(pgd);
97 pmd_free(mm, pmd); 96 pmd_free(mm, pmd);
98free: 97no_pgd:
99 free_pages((unsigned long) pgd, 2); 98 free_pages((unsigned long) pgd_base, 2);
100} 99}
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index b795afd0a2c6..e32fa499194c 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -91,7 +91,7 @@
91#if L_PTE_SHARED != PTE_EXT_SHARED 91#if L_PTE_SHARED != PTE_EXT_SHARED
92#error PTE shared bit mismatch 92#error PTE shared bit mismatch
93#endif 93#endif
94#if (L_PTE_EXEC+L_PTE_USER+L_PTE_WRITE+L_PTE_DIRTY+L_PTE_YOUNG+\ 94#if (L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\
95 L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED 95 L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED
96#error Invalid Linux PTE bit settings 96#error Invalid Linux PTE bit settings
97#endif 97#endif
@@ -109,6 +109,10 @@
109 * 110x 0 1 0 r/w r/o 109 * 110x 0 1 0 r/w r/o
110 * 11x0 0 1 0 r/w r/o 110 * 11x0 0 1 0 r/w r/o
111 * 1111 0 1 1 r/w r/w 111 * 1111 0 1 1 r/w r/w
112 *
113 * If !CONFIG_CPU_USE_DOMAINS, the following permissions are changed:
114 * 110x 1 1 1 r/o r/o
115 * 11x0 1 1 1 r/o r/o
112 */ 116 */
113 .macro armv6_mt_table pfx 117 .macro armv6_mt_table pfx
114\pfx\()_mt_table: 118\pfx\()_mt_table:
@@ -131,7 +135,7 @@
131 .endm 135 .endm
132 136
133 .macro armv6_set_pte_ext pfx 137 .macro armv6_set_pte_ext pfx
134 str r1, [r0], #-2048 @ linux version 138 str r1, [r0], #2048 @ linux version
135 139
136 bic r3, r1, #0x000003fc 140 bic r3, r1, #0x000003fc
137 bic r3, r3, #PTE_TYPE_MASK 141 bic r3, r3, #PTE_TYPE_MASK
@@ -142,17 +146,20 @@
142 and r2, r1, #L_PTE_MT_MASK 146 and r2, r1, #L_PTE_MT_MASK
143 ldr r2, [ip, r2] 147 ldr r2, [ip, r2]
144 148
145 tst r1, #L_PTE_WRITE 149 eor r1, r1, #L_PTE_DIRTY
146 tstne r1, #L_PTE_DIRTY 150 tst r1, #L_PTE_DIRTY|L_PTE_RDONLY
147 orreq r3, r3, #PTE_EXT_APX 151 orrne r3, r3, #PTE_EXT_APX
148 152
149 tst r1, #L_PTE_USER 153 tst r1, #L_PTE_USER
150 orrne r3, r3, #PTE_EXT_AP1 154 orrne r3, r3, #PTE_EXT_AP1
155#ifdef CONFIG_CPU_USE_DOMAINS
156 @ allow kernel read/write access to read-only user pages
151 tstne r3, #PTE_EXT_APX 157 tstne r3, #PTE_EXT_APX
152 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 158 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
159#endif
153 160
154 tst r1, #L_PTE_EXEC 161 tst r1, #L_PTE_XN
155 orreq r3, r3, #PTE_EXT_XN 162 orrne r3, r3, #PTE_EXT_XN
156 163
157 orr r3, r3, r2 164 orr r3, r3, r2
158 165
@@ -180,9 +187,9 @@
180 * 1111 0xff r/w r/w 187 * 1111 0xff r/w r/w
181 */ 188 */
182 .macro armv3_set_pte_ext wc_disable=1 189 .macro armv3_set_pte_ext wc_disable=1
183 str r1, [r0], #-2048 @ linux version 190 str r1, [r0], #2048 @ linux version
184 191
185 eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY 192 eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY
186 193
187 bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits 194 bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits
188 bic r2, r2, #PTE_TYPE_MASK 195 bic r2, r2, #PTE_TYPE_MASK
@@ -191,7 +198,7 @@
191 tst r3, #L_PTE_USER @ user? 198 tst r3, #L_PTE_USER @ user?
192 orrne r2, r2, #PTE_SMALL_AP_URO_SRW 199 orrne r2, r2, #PTE_SMALL_AP_URO_SRW
193 200
194 tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ write and dirty? 201 tst r3, #L_PTE_RDONLY | L_PTE_DIRTY @ write and dirty?
195 orreq r2, r2, #PTE_SMALL_AP_UNO_SRW 202 orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
196 203
197 tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young? 204 tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young?
@@ -203,7 +210,7 @@
203 bicne r2, r2, #PTE_BUFFERABLE 210 bicne r2, r2, #PTE_BUFFERABLE
204#endif 211#endif
205 .endif 212 .endif
206 str r2, [r0] @ hardware version 213 str r2, [r0] @ hardware version
207 .endm 214 .endm
208 215
209 216
@@ -223,9 +230,9 @@
223 * 1111 11 r/w r/w 230 * 1111 11 r/w r/w
224 */ 231 */
225 .macro xscale_set_pte_ext_prologue 232 .macro xscale_set_pte_ext_prologue
226 str r1, [r0], #-2048 @ linux version 233 str r1, [r0] @ linux version
227 234
228 eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY 235 eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY
229 236
230 bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits 237 bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits
231 orr r2, r2, #PTE_TYPE_EXT @ extended page 238 orr r2, r2, #PTE_TYPE_EXT @ extended page
@@ -233,7 +240,7 @@
233 tst r3, #L_PTE_USER @ user? 240 tst r3, #L_PTE_USER @ user?
234 orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w 241 orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w
235 242
236 tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ write and dirty? 243 tst r3, #L_PTE_RDONLY | L_PTE_DIRTY @ write and dirty?
237 orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w 244 orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w
238 @ combined with user -> user r/w 245 @ combined with user -> user r/w
239 .endm 246 .endm
@@ -242,7 +249,7 @@
242 tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young? 249 tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young?
243 movne r2, #0 @ no -> fault 250 movne r2, #0 @ no -> fault
244 251
245 str r2, [r0] @ hardware version 252 str r2, [r0, #2048]! @ hardware version
246 mov ip, #0 253 mov ip, #0
247 mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line 254 mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
248 mcr p15, 0, ip, c7, c10, 4 @ data write barrier 255 mcr p15, 0, ip, c7, c10, 4 @ data write barrier
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 9b9ff5d949fd..b49fab21517c 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -124,15 +124,13 @@ ENDPROC(cpu_v7_switch_mm)
124 * Set a level 2 translation table entry. 124 * Set a level 2 translation table entry.
125 * 125 *
126 * - ptep - pointer to level 2 translation table entry 126 * - ptep - pointer to level 2 translation table entry
127 * (hardware version is stored at -1024 bytes) 127 * (hardware version is stored at +2048 bytes)
128 * - pte - PTE value to store 128 * - pte - PTE value to store
129 * - ext - value for extended PTE bits 129 * - ext - value for extended PTE bits
130 */ 130 */
131ENTRY(cpu_v7_set_pte_ext) 131ENTRY(cpu_v7_set_pte_ext)
132#ifdef CONFIG_MMU 132#ifdef CONFIG_MMU
133 ARM( str r1, [r0], #-2048 ) @ linux version 133 str r1, [r0] @ linux version
134 THUMB( str r1, [r0] ) @ linux version
135 THUMB( sub r0, r0, #2048 )
136 134
137 bic r3, r1, #0x000003f0 135 bic r3, r1, #0x000003f0
138 bic r3, r3, #PTE_TYPE_MASK 136 bic r3, r3, #PTE_TYPE_MASK
@@ -142,23 +140,26 @@ ENTRY(cpu_v7_set_pte_ext)
142 tst r1, #1 << 4 140 tst r1, #1 << 4
143 orrne r3, r3, #PTE_EXT_TEX(1) 141 orrne r3, r3, #PTE_EXT_TEX(1)
144 142
145 tst r1, #L_PTE_WRITE 143 eor r1, r1, #L_PTE_DIRTY
146 tstne r1, #L_PTE_DIRTY 144 tst r1, #L_PTE_RDONLY | L_PTE_DIRTY
147 orreq r3, r3, #PTE_EXT_APX 145 orrne r3, r3, #PTE_EXT_APX
148 146
149 tst r1, #L_PTE_USER 147 tst r1, #L_PTE_USER
150 orrne r3, r3, #PTE_EXT_AP1 148 orrne r3, r3, #PTE_EXT_AP1
149#ifdef CONFIG_CPU_USE_DOMAINS
150 @ allow kernel read/write access to read-only user pages
151 tstne r3, #PTE_EXT_APX 151 tstne r3, #PTE_EXT_APX
152 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 152 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
153#endif
153 154
154 tst r1, #L_PTE_EXEC 155 tst r1, #L_PTE_XN
155 orreq r3, r3, #PTE_EXT_XN 156 orrne r3, r3, #PTE_EXT_XN
156 157
157 tst r1, #L_PTE_YOUNG 158 tst r1, #L_PTE_YOUNG
158 tstne r1, #L_PTE_PRESENT 159 tstne r1, #L_PTE_PRESENT
159 moveq r3, #0 160 moveq r3, #0
160 161
161 str r3, [r0] 162 str r3, [r0, #2048]!
162 mcr p15, 0, r0, c7, c10, 1 @ flush_pte 163 mcr p15, 0, r0, c7, c10, 1 @ flush_pte
163#endif 164#endif
164 mov pc, lr 165 mov pc, lr
@@ -273,8 +274,6 @@ __v7_setup:
273 ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) 274 ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
274 ALT_UP(orr r4, r4, #TTB_FLAGS_UP) 275 ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
275 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 276 mcr p15, 0, r4, c2, c0, 1 @ load TTB1
276 mov r10, #0x1f @ domains 0, 1 = manager
277 mcr p15, 0, r10, c3, c0, 0 @ load domain access register
278 /* 277 /*
279 * Memory region attributes with SCTLR.TRE=1 278 * Memory region attributes with SCTLR.TRE=1
280 * 279 *
@@ -313,6 +312,10 @@ __v7_setup:
313#ifdef CONFIG_CPU_ENDIAN_BE8 312#ifdef CONFIG_CPU_ENDIAN_BE8
314 orr r6, r6, #1 << 25 @ big-endian page tables 313 orr r6, r6, #1 << 25 @ big-endian page tables
315#endif 314#endif
315#ifdef CONFIG_SWP_EMULATE
316 orr r5, r5, #(1 << 10) @ set SW bit in "clear"
317 bic r6, r6, #(1 << 10) @ clear it in "mmuset"
318#endif
316 mrc p15, 0, r0, c1, c0, 0 @ read control register 319 mrc p15, 0, r0, c1, c0, 0 @ read control register
317 bic r0, r0, r5 @ clear bits them 320 bic r0, r0, r5 @ clear bits them
318 orr r0, r0, r6 @ set them 321 orr r0, r0, r6 @ set them
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 523408c0bb38..5a37c5e45c41 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -500,8 +500,8 @@ ENTRY(cpu_xscale_set_pte_ext)
500 @ 500 @
501 @ Erratum 40: must set memory to write-through for user read-only pages 501 @ Erratum 40: must set memory to write-through for user read-only pages
502 @ 502 @
503 and ip, r1, #(L_PTE_MT_MASK | L_PTE_USER | L_PTE_WRITE) & ~(4 << 2) 503 and ip, r1, #(L_PTE_MT_MASK | L_PTE_USER | L_PTE_RDONLY) & ~(4 << 2)
504 teq ip, #L_PTE_MT_WRITEBACK | L_PTE_USER 504 teq ip, #L_PTE_MT_WRITEBACK | L_PTE_USER | L_PTE_RDONLY
505 505
506 moveq r1, #L_PTE_MT_WRITETHROUGH 506 moveq r1, #L_PTE_MT_WRITETHROUGH
507 and r1, r1, #L_PTE_MT_MASK 507 and r1, r1, #L_PTE_MT_MASK