aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig8
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/abort-ev7.S1
-rw-r--r--arch/arm/mm/abort-nommu.S1
-rw-r--r--arch/arm/mm/alignment.c2
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c42
-rw-r--r--arch/arm/mm/cache-l2x0.c2
-rw-r--r--arch/arm/mm/cache-v7.S10
-rw-r--r--arch/arm/mm/cache-xsc3l2.c3
-rw-r--r--arch/arm/mm/copypage-v4mc.c2
-rw-r--r--arch/arm/mm/copypage-v6.c1
-rw-r--r--arch/arm/mm/copypage-xscale.c2
-rw-r--r--arch/arm/mm/dma-mapping.c (renamed from arch/arm/mm/consistent.c)104
-rw-r--r--arch/arm/mm/extable.c2
-rw-r--r--arch/arm/mm/fault-armv.c13
-rw-r--r--arch/arm/mm/fault.c7
-rw-r--r--arch/arm/mm/flush.c1
-rw-r--r--arch/arm/mm/init.c193
-rw-r--r--arch/arm/mm/iomap.c3
-rw-r--r--arch/arm/mm/ioremap.c15
-rw-r--r--arch/arm/mm/mm.h3
-rw-r--r--arch/arm/mm/mmap.c6
-rw-r--r--arch/arm/mm/mmu.c108
-rw-r--r--arch/arm/mm/nommu.c18
-rw-r--r--arch/arm/mm/proc-arm1020.S26
-rw-r--r--arch/arm/mm/proc-arm1020e.S26
-rw-r--r--arch/arm/mm/proc-arm1022.S26
-rw-r--r--arch/arm/mm/proc-arm1026.S26
-rw-r--r--arch/arm/mm/proc-arm6_7.S27
-rw-r--r--arch/arm/mm/proc-arm720.S25
-rw-r--r--arch/arm/mm/proc-arm740.S2
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S2
-rw-r--r--arch/arm/mm/proc-arm920.S28
-rw-r--r--arch/arm/mm/proc-arm922.S26
-rw-r--r--arch/arm/mm/proc-arm925.S26
-rw-r--r--arch/arm/mm/proc-arm926.S26
-rw-r--r--arch/arm/mm/proc-arm940.S2
-rw-r--r--arch/arm/mm/proc-arm946.S2
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S2
-rw-r--r--arch/arm/mm/proc-feroceon.S33
-rw-r--r--arch/arm/mm/proc-macros.S170
-rw-r--r--arch/arm/mm/proc-sa110.S21
-rw-r--r--arch/arm/mm/proc-sa1100.S21
-rw-r--r--arch/arm/mm/proc-v6.S42
-rw-r--r--arch/arm/mm/proc-v7.S39
-rw-r--r--arch/arm/mm/proc-xsc3.S56
-rw-r--r--arch/arm/mm/proc-xscale.S76
-rw-r--r--arch/arm/mm/tlb-v7.S2
48 files changed, 690 insertions, 591 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index ed15f876c725..330814d1ee25 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -735,6 +735,14 @@ config CACHE_FEROCEON_L2
735 help 735 help
736 This option enables the Feroceon L2 cache controller. 736 This option enables the Feroceon L2 cache controller.
737 737
738config CACHE_FEROCEON_L2_WRITETHROUGH
739 bool "Force Feroceon L2 cache write through"
740 depends on CACHE_FEROCEON_L2
741 default n
742 help
743 Say Y here to use the Feroceon L2 cache in writethrough mode.
744 Unless you specifically require this, say N for writeback mode.
745
738config CACHE_L2X0 746config CACHE_L2X0
739 bool "Enable the L2x0 outer cache controller" 747 bool "Enable the L2x0 outer cache controller"
740 depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 748 depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 2e27a8c8372b..480f78a3611a 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the linux arm-specific parts of the memory manager. 2# Makefile for the linux arm-specific parts of the memory manager.
3# 3#
4 4
5obj-y := consistent.o extable.o fault.o init.o \ 5obj-y := dma-mapping.o extable.o fault.o init.o \
6 iomap.o 6 iomap.o
7 7
8obj-$(CONFIG_MMU) += fault-armv.o flush.o ioremap.o mmap.o \ 8obj-$(CONFIG_MMU) += fault-armv.o flush.o ioremap.o mmap.o \
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index eb90bce38e14..2e6dc040c654 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -30,3 +30,4 @@ ENTRY(v7_early_abort)
30 * New designs should not need to patch up faults. 30 * New designs should not need to patch up faults.
31 */ 31 */
32 mov pc, lr 32 mov pc, lr
33ENDPROC(v7_early_abort)
diff --git a/arch/arm/mm/abort-nommu.S b/arch/arm/mm/abort-nommu.S
index a7cc7f9ee45d..625e580945b5 100644
--- a/arch/arm/mm/abort-nommu.S
+++ b/arch/arm/mm/abort-nommu.S
@@ -17,3 +17,4 @@ ENTRY(nommu_early_abort)
17 mov r0, #0 @ clear r0, r1 (no FSR/FAR) 17 mov r0, #0 @ clear r0, r1 (no FSR/FAR)
18 mov r1, #0 18 mov r1, #0
19 mov pc, lr 19 mov pc, lr
20ENDPROC(nommu_early_abort)
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index e162cca5917f..133e65d166b3 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -17,8 +17,8 @@
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/uaccess.h>
20 21
21#include <asm/uaccess.h>
22#include <asm/unaligned.h> 22#include <asm/unaligned.h>
23 23
24#include "fault.h" 24#include "fault.h"
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index 7b5a25d81576..13cdae8b0d44 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -48,11 +48,12 @@ static inline void l2_clean_mva_range(unsigned long start, unsigned long end)
48 * L2 is PIPT and range operations only do a TLB lookup on 48 * L2 is PIPT and range operations only do a TLB lookup on
49 * the start address. 49 * the start address.
50 */ 50 */
51 BUG_ON((start ^ end) & ~(PAGE_SIZE - 1)); 51 BUG_ON((start ^ end) >> PAGE_SHIFT);
52 52
53 raw_local_irq_save(flags); 53 raw_local_irq_save(flags);
54 __asm__("mcr p15, 1, %0, c15, c9, 4" : : "r" (start)); 54 __asm__("mcr p15, 1, %0, c15, c9, 4\n\t"
55 __asm__("mcr p15, 1, %0, c15, c9, 5" : : "r" (end)); 55 "mcr p15, 1, %1, c15, c9, 5"
56 : : "r" (start), "r" (end));
56 raw_local_irq_restore(flags); 57 raw_local_irq_restore(flags);
57} 58}
58 59
@@ -80,11 +81,12 @@ static inline void l2_inv_mva_range(unsigned long start, unsigned long end)
80 * L2 is PIPT and range operations only do a TLB lookup on 81 * L2 is PIPT and range operations only do a TLB lookup on
81 * the start address. 82 * the start address.
82 */ 83 */
83 BUG_ON((start ^ end) & ~(PAGE_SIZE - 1)); 84 BUG_ON((start ^ end) >> PAGE_SHIFT);
84 85
85 raw_local_irq_save(flags); 86 raw_local_irq_save(flags);
86 __asm__("mcr p15, 1, %0, c15, c11, 4" : : "r" (start)); 87 __asm__("mcr p15, 1, %0, c15, c11, 4\n\t"
87 __asm__("mcr p15, 1, %0, c15, c11, 5" : : "r" (end)); 88 "mcr p15, 1, %1, c15, c11, 5"
89 : : "r" (start), "r" (end));
88 raw_local_irq_restore(flags); 90 raw_local_irq_restore(flags);
89} 91}
90 92
@@ -205,7 +207,7 @@ static void feroceon_l2_flush_range(unsigned long start, unsigned long end)
205 * time. These are necessary because the L2 cache can only be enabled 207 * time. These are necessary because the L2 cache can only be enabled
206 * or disabled while the L1 Dcache and Icache are both disabled. 208 * or disabled while the L1 Dcache and Icache are both disabled.
207 */ 209 */
208static void __init invalidate_and_disable_dcache(void) 210static int __init flush_and_disable_dcache(void)
209{ 211{
210 u32 cr; 212 u32 cr;
211 213
@@ -217,7 +219,9 @@ static void __init invalidate_and_disable_dcache(void)
217 flush_cache_all(); 219 flush_cache_all();
218 set_cr(cr & ~CR_C); 220 set_cr(cr & ~CR_C);
219 raw_local_irq_restore(flags); 221 raw_local_irq_restore(flags);
222 return 1;
220 } 223 }
224 return 0;
221} 225}
222 226
223static void __init enable_dcache(void) 227static void __init enable_dcache(void)
@@ -225,18 +229,17 @@ static void __init enable_dcache(void)
225 u32 cr; 229 u32 cr;
226 230
227 cr = get_cr(); 231 cr = get_cr();
228 if (!(cr & CR_C)) 232 set_cr(cr | CR_C);
229 set_cr(cr | CR_C);
230} 233}
231 234
232static void __init __invalidate_icache(void) 235static void __init __invalidate_icache(void)
233{ 236{
234 int dummy; 237 int dummy;
235 238
236 __asm__ __volatile__("mcr p15, 0, %0, c7, c5, 0\n" : "=r" (dummy)); 239 __asm__ __volatile__("mcr p15, 0, %0, c7, c5, 0" : "=r" (dummy));
237} 240}
238 241
239static void __init invalidate_and_disable_icache(void) 242static int __init invalidate_and_disable_icache(void)
240{ 243{
241 u32 cr; 244 u32 cr;
242 245
@@ -244,7 +247,9 @@ static void __init invalidate_and_disable_icache(void)
244 if (cr & CR_I) { 247 if (cr & CR_I) {
245 set_cr(cr & ~CR_I); 248 set_cr(cr & ~CR_I);
246 __invalidate_icache(); 249 __invalidate_icache();
250 return 1;
247 } 251 }
252 return 0;
248} 253}
249 254
250static void __init enable_icache(void) 255static void __init enable_icache(void)
@@ -252,8 +257,7 @@ static void __init enable_icache(void)
252 u32 cr; 257 u32 cr;
253 258
254 cr = get_cr(); 259 cr = get_cr();
255 if (!(cr & CR_I)) 260 set_cr(cr | CR_I);
256 set_cr(cr | CR_I);
257} 261}
258 262
259static inline u32 read_extra_features(void) 263static inline u32 read_extra_features(void)
@@ -291,13 +295,17 @@ static void __init enable_l2(void)
291 295
292 u = read_extra_features(); 296 u = read_extra_features();
293 if (!(u & 0x00400000)) { 297 if (!(u & 0x00400000)) {
298 int i, d;
299
294 printk(KERN_INFO "Feroceon L2: Enabling L2\n"); 300 printk(KERN_INFO "Feroceon L2: Enabling L2\n");
295 301
296 invalidate_and_disable_dcache(); 302 d = flush_and_disable_dcache();
297 invalidate_and_disable_icache(); 303 i = invalidate_and_disable_icache();
298 write_extra_features(u | 0x00400000); 304 write_extra_features(u | 0x00400000);
299 enable_icache(); 305 if (i)
300 enable_dcache(); 306 enable_icache();
307 if (d)
308 enable_dcache();
301 } 309 }
302} 310}
303 311
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 76b800a95191..b480f1d3591f 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -18,9 +18,9 @@
18 */ 18 */
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/io.h>
21 22
22#include <asm/cacheflush.h> 23#include <asm/cacheflush.h>
23#include <asm/io.h>
24#include <asm/hardware/cache-l2x0.h> 24#include <asm/hardware/cache-l2x0.h>
25 25
26#define CACHE_LINE_SIZE 32 26#define CACHE_LINE_SIZE 32
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 35ffc4d95997..d19c2bec2b1f 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -66,6 +66,7 @@ finished:
66 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 66 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
67 isb 67 isb
68 mov pc, lr 68 mov pc, lr
69ENDPROC(v7_flush_dcache_all)
69 70
70/* 71/*
71 * v7_flush_cache_all() 72 * v7_flush_cache_all()
@@ -85,6 +86,7 @@ ENTRY(v7_flush_kern_cache_all)
85 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 86 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
86 ldmfd sp!, {r4-r5, r7, r9-r11, lr} 87 ldmfd sp!, {r4-r5, r7, r9-r11, lr}
87 mov pc, lr 88 mov pc, lr
89ENDPROC(v7_flush_kern_cache_all)
88 90
89/* 91/*
90 * v7_flush_cache_all() 92 * v7_flush_cache_all()
@@ -110,6 +112,8 @@ ENTRY(v7_flush_user_cache_all)
110 */ 112 */
111ENTRY(v7_flush_user_cache_range) 113ENTRY(v7_flush_user_cache_range)
112 mov pc, lr 114 mov pc, lr
115ENDPROC(v7_flush_user_cache_all)
116ENDPROC(v7_flush_user_cache_range)
113 117
114/* 118/*
115 * v7_coherent_kern_range(start,end) 119 * v7_coherent_kern_range(start,end)
@@ -155,6 +159,8 @@ ENTRY(v7_coherent_user_range)
155 dsb 159 dsb
156 isb 160 isb
157 mov pc, lr 161 mov pc, lr
162ENDPROC(v7_coherent_kern_range)
163ENDPROC(v7_coherent_user_range)
158 164
159/* 165/*
160 * v7_flush_kern_dcache_page(kaddr) 166 * v7_flush_kern_dcache_page(kaddr)
@@ -174,6 +180,7 @@ ENTRY(v7_flush_kern_dcache_page)
174 blo 1b 180 blo 1b
175 dsb 181 dsb
176 mov pc, lr 182 mov pc, lr
183ENDPROC(v7_flush_kern_dcache_page)
177 184
178/* 185/*
179 * v7_dma_inv_range(start,end) 186 * v7_dma_inv_range(start,end)
@@ -202,6 +209,7 @@ ENTRY(v7_dma_inv_range)
202 blo 1b 209 blo 1b
203 dsb 210 dsb
204 mov pc, lr 211 mov pc, lr
212ENDPROC(v7_dma_inv_range)
205 213
206/* 214/*
207 * v7_dma_clean_range(start,end) 215 * v7_dma_clean_range(start,end)
@@ -219,6 +227,7 @@ ENTRY(v7_dma_clean_range)
219 blo 1b 227 blo 1b
220 dsb 228 dsb
221 mov pc, lr 229 mov pc, lr
230ENDPROC(v7_dma_clean_range)
222 231
223/* 232/*
224 * v7_dma_flush_range(start,end) 233 * v7_dma_flush_range(start,end)
@@ -236,6 +245,7 @@ ENTRY(v7_dma_flush_range)
236 blo 1b 245 blo 1b
237 dsb 246 dsb
238 mov pc, lr 247 mov pc, lr
248ENDPROC(v7_dma_flush_range)
239 249
240 __INITDATA 250 __INITDATA
241 251
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c
index 158bd96763d3..10b1bae1a258 100644
--- a/arch/arm/mm/cache-xsc3l2.c
+++ b/arch/arm/mm/cache-xsc3l2.c
@@ -18,10 +18,11 @@
18 */ 18 */
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/io.h>
21 22
22#include <asm/system.h> 23#include <asm/system.h>
24#include <asm/cputype.h>
23#include <asm/cacheflush.h> 25#include <asm/cacheflush.h>
24#include <asm/io.h>
25 26
26#define CR_L2 (1 << 26) 27#define CR_L2 (1 << 26)
27 28
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index ded0e96d069d..8d33e2549344 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -28,7 +28,7 @@
28 * specific hacks for copying pages efficiently. 28 * specific hacks for copying pages efficiently.
29 */ 29 */
30#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ 30#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
31 L_PTE_CACHEABLE) 31 L_PTE_MT_MINICACHE)
32 32
33static DEFINE_SPINLOCK(minicache_lock); 33static DEFINE_SPINLOCK(minicache_lock);
34 34
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 3adb79257f43..0e21c0767580 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -16,6 +16,7 @@
16#include <asm/shmparam.h> 16#include <asm/shmparam.h>
17#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
18#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
19#include <asm/cachetype.h>
19 20
20#include "mm.h" 21#include "mm.h"
21 22
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index 2e455f82a4d5..bad49331bbf9 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -30,7 +30,7 @@
30#define COPYPAGE_MINICACHE 0xffff8000 30#define COPYPAGE_MINICACHE 0xffff8000
31 31
32#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ 32#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
33 L_PTE_CACHEABLE) 33 L_PTE_MT_MINICACHE)
34 34
35static DEFINE_SPINLOCK(minicache_lock); 35static DEFINE_SPINLOCK(minicache_lock);
36 36
diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/dma-mapping.c
index db7b3e38ef1d..67960017dc8f 100644
--- a/arch/arm/mm/consistent.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/arch/arm/mm/consistent.c 2 * linux/arch/arm/mm/dma-mapping.c
3 * 3 *
4 * Copyright (C) 2000-2004 Russell King 4 * Copyright (C) 2000-2004 Russell King
5 * 5 *
@@ -512,3 +512,105 @@ void dma_cache_maint(const void *start, size_t size, int direction)
512 } 512 }
513} 513}
514EXPORT_SYMBOL(dma_cache_maint); 514EXPORT_SYMBOL(dma_cache_maint);
515
516/**
517 * dma_map_sg - map a set of SG buffers for streaming mode DMA
518 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
519 * @sg: list of buffers
520 * @nents: number of buffers to map
521 * @dir: DMA transfer direction
522 *
523 * Map a set of buffers described by scatterlist in streaming mode for DMA.
524 * This is the scatter-gather version of the dma_map_single interface.
525 * Here the scatter gather list elements are each tagged with the
526 * appropriate dma address and length. They are obtained via
527 * sg_dma_{address,length}.
528 *
529 * Device ownership issues as mentioned for dma_map_single are the same
530 * here.
531 */
532int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
533 enum dma_data_direction dir)
534{
535 struct scatterlist *s;
536 int i, j;
537
538 for_each_sg(sg, s, nents, i) {
539 s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
540 s->length, dir);
541 if (dma_mapping_error(dev, s->dma_address))
542 goto bad_mapping;
543 }
544 return nents;
545
546 bad_mapping:
547 for_each_sg(sg, s, i, j)
548 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
549 return 0;
550}
551EXPORT_SYMBOL(dma_map_sg);
552
553/**
554 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
555 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
556 * @sg: list of buffers
557 * @nents: number of buffers to unmap (returned from dma_map_sg)
558 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
559 *
560 * Unmap a set of streaming mode DMA translations. Again, CPU access
561 * rules concerning calls here are the same as for dma_unmap_single().
562 */
563void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
564 enum dma_data_direction dir)
565{
566 struct scatterlist *s;
567 int i;
568
569 for_each_sg(sg, s, nents, i)
570 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
571}
572EXPORT_SYMBOL(dma_unmap_sg);
573
574/**
575 * dma_sync_sg_for_cpu
576 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
577 * @sg: list of buffers
578 * @nents: number of buffers to map (returned from dma_map_sg)
579 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
580 */
581void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
582 int nents, enum dma_data_direction dir)
583{
584 struct scatterlist *s;
585 int i;
586
587 for_each_sg(sg, s, nents, i) {
588 dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
589 sg_dma_len(s), dir);
590 }
591}
592EXPORT_SYMBOL(dma_sync_sg_for_cpu);
593
594/**
595 * dma_sync_sg_for_device
596 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
597 * @sg: list of buffers
598 * @nents: number of buffers to map (returned from dma_map_sg)
599 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
600 */
601void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
602 int nents, enum dma_data_direction dir)
603{
604 struct scatterlist *s;
605 int i;
606
607 for_each_sg(sg, s, nents, i) {
608 if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
609 sg_dma_len(s), dir))
610 continue;
611
612 if (!arch_is_coherent())
613 dma_cache_maint(sg_virt(s), s->length, dir);
614 }
615}
616EXPORT_SYMBOL(dma_sync_sg_for_device);
diff --git a/arch/arm/mm/extable.c b/arch/arm/mm/extable.c
index 9592c3ee4cb2..9d285626bc7d 100644
--- a/arch/arm/mm/extable.c
+++ b/arch/arm/mm/extable.c
@@ -2,7 +2,7 @@
2 * linux/arch/arm/mm/extable.c 2 * linux/arch/arm/mm/extable.c
3 */ 3 */
4#include <linux/module.h> 4#include <linux/module.h>
5#include <asm/uaccess.h> 5#include <linux/uaccess.h>
6 6
7int fixup_exception(struct pt_regs *regs) 7int fixup_exception(struct pt_regs *regs)
8{ 8{
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index a8ec97b4752e..81d0b8772de3 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -17,11 +17,13 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/pagemap.h> 18#include <linux/pagemap.h>
19 19
20#include <asm/bugs.h>
20#include <asm/cacheflush.h> 21#include <asm/cacheflush.h>
22#include <asm/cachetype.h>
21#include <asm/pgtable.h> 23#include <asm/pgtable.h>
22#include <asm/tlbflush.h> 24#include <asm/tlbflush.h>
23 25
24static unsigned long shared_pte_mask = L_PTE_CACHEABLE; 26static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
25 27
26/* 28/*
27 * We take the easy way out of this problem - we make the 29 * We take the easy way out of this problem - we make the
@@ -63,9 +65,10 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
63 * If this page isn't present, or is already setup to 65 * If this page isn't present, or is already setup to
64 * fault (ie, is old), we can safely ignore any issues. 66 * fault (ie, is old), we can safely ignore any issues.
65 */ 67 */
66 if (ret && pte_val(entry) & shared_pte_mask) { 68 if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
67 flush_cache_page(vma, address, pte_pfn(entry)); 69 flush_cache_page(vma, address, pte_pfn(entry));
68 pte_val(entry) &= ~shared_pte_mask; 70 pte_val(entry) &= ~L_PTE_MT_MASK;
71 pte_val(entry) |= shared_pte_mask;
69 set_pte_at(vma->vm_mm, address, pte, entry); 72 set_pte_at(vma->vm_mm, address, pte, entry);
70 flush_tlb_page(vma, address); 73 flush_tlb_page(vma, address);
71 } 74 }
@@ -197,7 +200,7 @@ void __init check_writebuffer_bugs(void)
197 unsigned long *p1, *p2; 200 unsigned long *p1, *p2;
198 pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG| 201 pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG|
199 L_PTE_DIRTY|L_PTE_WRITE| 202 L_PTE_DIRTY|L_PTE_WRITE|
200 L_PTE_BUFFERABLE); 203 L_PTE_MT_BUFFERABLE);
201 204
202 p1 = vmap(&page, 1, VM_IOREMAP, prot); 205 p1 = vmap(&page, 1, VM_IOREMAP, prot);
203 p2 = vmap(&page, 1, VM_IOREMAP, prot); 206 p2 = vmap(&page, 1, VM_IOREMAP, prot);
@@ -218,7 +221,7 @@ void __init check_writebuffer_bugs(void)
218 221
219 if (v) { 222 if (v) {
220 printk("failed, %s\n", reason); 223 printk("failed, %s\n", reason);
221 shared_pte_mask |= L_PTE_BUFFERABLE; 224 shared_pte_mask = L_PTE_MT_UNCACHED;
222 } else { 225 } else {
223 printk("ok\n"); 226 printk("ok\n");
224 } 227 }
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 28ad7ab1c0cd..2df8d9facf57 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -13,11 +13,11 @@
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/kprobes.h> 15#include <linux/kprobes.h>
16#include <linux/uaccess.h>
16 17
17#include <asm/system.h> 18#include <asm/system.h>
18#include <asm/pgtable.h> 19#include <asm/pgtable.h>
19#include <asm/tlbflush.h> 20#include <asm/tlbflush.h>
20#include <asm/uaccess.h>
21 21
22#include "fault.h" 22#include "fault.h"
23 23
@@ -72,9 +72,8 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
72 } 72 }
73 73
74 pmd = pmd_offset(pgd, addr); 74 pmd = pmd_offset(pgd, addr);
75#if PTRS_PER_PMD != 1 75 if (PTRS_PER_PMD != 1)
76 printk(", *pmd=%08lx", pmd_val(*pmd)); 76 printk(", *pmd=%08lx", pmd_val(*pmd));
77#endif
78 77
79 if (pmd_none(*pmd)) 78 if (pmd_none(*pmd))
80 break; 79 break;
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 029ee65fda2b..0fa9bf388f0b 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -12,6 +12,7 @@
12#include <linux/pagemap.h> 12#include <linux/pagemap.h>
13 13
14#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
15#include <asm/cachetype.h>
15#include <asm/system.h> 16#include <asm/system.h>
16#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
17 18
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 30a69d67d673..82c4b4217989 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -26,9 +26,42 @@
26 26
27#include "mm.h" 27#include "mm.h"
28 28
29extern void _text, _etext, __data_start, _end, __init_begin, __init_end; 29static unsigned long phys_initrd_start __initdata = 0;
30extern unsigned long phys_initrd_start; 30static unsigned long phys_initrd_size __initdata = 0;
31extern unsigned long phys_initrd_size; 31
32static void __init early_initrd(char **p)
33{
34 unsigned long start, size;
35
36 start = memparse(*p, p);
37 if (**p == ',') {
38 size = memparse((*p) + 1, p);
39
40 phys_initrd_start = start;
41 phys_initrd_size = size;
42 }
43}
44__early_param("initrd=", early_initrd);
45
46static int __init parse_tag_initrd(const struct tag *tag)
47{
48 printk(KERN_WARNING "ATAG_INITRD is deprecated; "
49 "please update your bootloader.\n");
50 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
51 phys_initrd_size = tag->u.initrd.size;
52 return 0;
53}
54
55__tagtable(ATAG_INITRD, parse_tag_initrd);
56
57static int __init parse_tag_initrd2(const struct tag *tag)
58{
59 phys_initrd_start = tag->u.initrd.start;
60 phys_initrd_size = tag->u.initrd.size;
61 return 0;
62}
63
64__tagtable(ATAG_INITRD2, parse_tag_initrd2);
32 65
33/* 66/*
34 * This is used to pass memory configuration data from paging_init 67 * This is used to pass memory configuration data from paging_init
@@ -36,10 +69,6 @@ extern unsigned long phys_initrd_size;
36 */ 69 */
37static struct meminfo meminfo = { 0, }; 70static struct meminfo meminfo = { 0, };
38 71
39#define for_each_nodebank(iter,mi,no) \
40 for (iter = 0; iter < mi->nr_banks; iter++) \
41 if (mi->bank[iter].node == no)
42
43void show_mem(void) 72void show_mem(void)
44{ 73{
45 int free = 0, total = 0, reserved = 0; 74 int free = 0, total = 0, reserved = 0;
@@ -50,14 +79,15 @@ void show_mem(void)
50 show_free_areas(); 79 show_free_areas();
51 for_each_online_node(node) { 80 for_each_online_node(node) {
52 pg_data_t *n = NODE_DATA(node); 81 pg_data_t *n = NODE_DATA(node);
53 struct page *map = n->node_mem_map - n->node_start_pfn; 82 struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn;
54 83
55 for_each_nodebank (i,mi,node) { 84 for_each_nodebank (i,mi,node) {
85 struct membank *bank = &mi->bank[i];
56 unsigned int pfn1, pfn2; 86 unsigned int pfn1, pfn2;
57 struct page *page, *end; 87 struct page *page, *end;
58 88
59 pfn1 = __phys_to_pfn(mi->bank[i].start); 89 pfn1 = bank_pfn_start(bank);
60 pfn2 = __phys_to_pfn(mi->bank[i].size + mi->bank[i].start); 90 pfn2 = bank_pfn_end(bank);
61 91
62 page = map + pfn1; 92 page = map + pfn1;
63 end = map + pfn2; 93 end = map + pfn2;
@@ -96,17 +126,17 @@ void show_mem(void)
96static unsigned int __init 126static unsigned int __init
97find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages) 127find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages)
98{ 128{
99 unsigned int start_pfn, bank, bootmap_pfn; 129 unsigned int start_pfn, i, bootmap_pfn;
100 130
101 start_pfn = PAGE_ALIGN(__pa(&_end)) >> PAGE_SHIFT; 131 start_pfn = PAGE_ALIGN(__pa(&_end)) >> PAGE_SHIFT;
102 bootmap_pfn = 0; 132 bootmap_pfn = 0;
103 133
104 for_each_nodebank(bank, mi, node) { 134 for_each_nodebank(i, mi, node) {
135 struct membank *bank = &mi->bank[i];
105 unsigned int start, end; 136 unsigned int start, end;
106 137
107 start = mi->bank[bank].start >> PAGE_SHIFT; 138 start = bank_pfn_start(bank);
108 end = (mi->bank[bank].size + 139 end = bank_pfn_end(bank);
109 mi->bank[bank].start) >> PAGE_SHIFT;
110 140
111 if (end < start_pfn) 141 if (end < start_pfn)
112 continue; 142 continue;
@@ -145,13 +175,10 @@ static int __init check_initrd(struct meminfo *mi)
145 initrd_node = -1; 175 initrd_node = -1;
146 176
147 for (i = 0; i < mi->nr_banks; i++) { 177 for (i = 0; i < mi->nr_banks; i++) {
148 unsigned long bank_end; 178 struct membank *bank = &mi->bank[i];
149 179 if (bank_phys_start(bank) <= phys_initrd_start &&
150 bank_end = mi->bank[i].start + mi->bank[i].size; 180 end <= bank_phys_end(bank))
151 181 initrd_node = bank->node;
152 if (mi->bank[i].start <= phys_initrd_start &&
153 end <= bank_end)
154 initrd_node = mi->bank[i].node;
155 } 182 }
156 } 183 }
157 184
@@ -171,19 +198,17 @@ static inline void map_memory_bank(struct membank *bank)
171#ifdef CONFIG_MMU 198#ifdef CONFIG_MMU
172 struct map_desc map; 199 struct map_desc map;
173 200
174 map.pfn = __phys_to_pfn(bank->start); 201 map.pfn = bank_pfn_start(bank);
175 map.virtual = __phys_to_virt(bank->start); 202 map.virtual = __phys_to_virt(bank_phys_start(bank));
176 map.length = bank->size; 203 map.length = bank_phys_size(bank);
177 map.type = MT_MEMORY; 204 map.type = MT_MEMORY;
178 205
179 create_mapping(&map); 206 create_mapping(&map);
180#endif 207#endif
181} 208}
182 209
183static unsigned long __init 210static unsigned long __init bootmem_init_node(int node, struct meminfo *mi)
184bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
185{ 211{
186 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
187 unsigned long start_pfn, end_pfn, boot_pfn; 212 unsigned long start_pfn, end_pfn, boot_pfn;
188 unsigned int boot_pages; 213 unsigned int boot_pages;
189 pg_data_t *pgdat; 214 pg_data_t *pgdat;
@@ -199,8 +224,8 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
199 struct membank *bank = &mi->bank[i]; 224 struct membank *bank = &mi->bank[i];
200 unsigned long start, end; 225 unsigned long start, end;
201 226
202 start = bank->start >> PAGE_SHIFT; 227 start = bank_pfn_start(bank);
203 end = (bank->start + bank->size) >> PAGE_SHIFT; 228 end = bank_pfn_end(bank);
204 229
205 if (start_pfn > start) 230 if (start_pfn > start)
206 start_pfn = start; 231 start_pfn = start;
@@ -230,8 +255,11 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
230 pgdat = NODE_DATA(node); 255 pgdat = NODE_DATA(node);
231 init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn); 256 init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn);
232 257
233 for_each_nodebank(i, mi, node) 258 for_each_nodebank(i, mi, node) {
234 free_bootmem_node(pgdat, mi->bank[i].start, mi->bank[i].size); 259 struct membank *bank = &mi->bank[i];
260 free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank));
261 memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank));
262 }
235 263
236 /* 264 /*
237 * Reserve the bootmem bitmap for this node. 265 * Reserve the bootmem bitmap for this node.
@@ -239,31 +267,39 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
239 reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT, 267 reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT,
240 boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); 268 boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
241 269
242 /* 270 return end_pfn;
243 * Reserve any special node zero regions. 271}
244 */
245 if (node == 0)
246 reserve_node_zero(pgdat);
247 272
273static void __init bootmem_reserve_initrd(int node)
274{
248#ifdef CONFIG_BLK_DEV_INITRD 275#ifdef CONFIG_BLK_DEV_INITRD
249 /* 276 pg_data_t *pgdat = NODE_DATA(node);
250 * If the initrd is in this node, reserve its memory. 277 int res;
251 */ 278
252 if (node == initrd_node) { 279 res = reserve_bootmem_node(pgdat, phys_initrd_start,
253 int res = reserve_bootmem_node(pgdat, phys_initrd_start, 280 phys_initrd_size, BOOTMEM_EXCLUSIVE);
254 phys_initrd_size, BOOTMEM_EXCLUSIVE); 281
255 282 if (res == 0) {
256 if (res == 0) { 283 initrd_start = __phys_to_virt(phys_initrd_start);
257 initrd_start = __phys_to_virt(phys_initrd_start); 284 initrd_end = initrd_start + phys_initrd_size;
258 initrd_end = initrd_start + phys_initrd_size; 285 } else {
259 } else { 286 printk(KERN_ERR
260 printk(KERN_ERR 287 "INITRD: 0x%08lx+0x%08lx overlaps in-use "
261 "INITRD: 0x%08lx+0x%08lx overlaps in-use " 288 "memory region - disabling initrd\n",
262 "memory region - disabling initrd\n", 289 phys_initrd_start, phys_initrd_size);
263 phys_initrd_start, phys_initrd_size);
264 }
265 } 290 }
266#endif 291#endif
292}
293
294static void __init bootmem_free_node(int node, struct meminfo *mi)
295{
296 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
297 unsigned long start_pfn, end_pfn;
298 pg_data_t *pgdat = NODE_DATA(node);
299 int i;
300
301 start_pfn = pgdat->bdata->node_min_pfn;
302 end_pfn = pgdat->bdata->node_low_pfn;
267 303
268 /* 304 /*
269 * initialise the zones within this node. 305 * initialise the zones within this node.
@@ -284,7 +320,7 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
284 */ 320 */
285 zhole_size[0] = zone_size[0]; 321 zhole_size[0] = zone_size[0];
286 for_each_nodebank(i, mi, node) 322 for_each_nodebank(i, mi, node)
287 zhole_size[0] -= mi->bank[i].size >> PAGE_SHIFT; 323 zhole_size[0] -= bank_pfn_size(&mi->bank[i]);
288 324
289 /* 325 /*
290 * Adjust the sizes according to any special requirements for 326 * Adjust the sizes according to any special requirements for
@@ -293,21 +329,12 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
293 arch_adjust_zones(node, zone_size, zhole_size); 329 arch_adjust_zones(node, zone_size, zhole_size);
294 330
295 free_area_init_node(node, zone_size, start_pfn, zhole_size); 331 free_area_init_node(node, zone_size, start_pfn, zhole_size);
296
297 return end_pfn;
298} 332}
299 333
300void __init bootmem_init(struct meminfo *mi) 334void __init bootmem_init(struct meminfo *mi)
301{ 335{
302 unsigned long memend_pfn = 0; 336 unsigned long memend_pfn = 0;
303 int node, initrd_node, i; 337 int node, initrd_node;
304
305 /*
306 * Invalidate the node number for empty or invalid memory banks
307 */
308 for (i = 0; i < mi->nr_banks; i++)
309 if (mi->bank[i].size == 0 || mi->bank[i].node >= MAX_NUMNODES)
310 mi->bank[i].node = -1;
311 338
312 memcpy(&meminfo, mi, sizeof(meminfo)); 339 memcpy(&meminfo, mi, sizeof(meminfo));
313 340
@@ -320,9 +347,19 @@ void __init bootmem_init(struct meminfo *mi)
320 * Run through each node initialising the bootmem allocator. 347 * Run through each node initialising the bootmem allocator.
321 */ 348 */
322 for_each_node(node) { 349 for_each_node(node) {
323 unsigned long end_pfn; 350 unsigned long end_pfn = bootmem_init_node(node, mi);
324 351
325 end_pfn = bootmem_init_node(node, initrd_node, mi); 352 /*
353 * Reserve any special node zero regions.
354 */
355 if (node == 0)
356 reserve_node_zero(NODE_DATA(node));
357
358 /*
359 * If the initrd is in this node, reserve its memory.
360 */
361 if (node == initrd_node)
362 bootmem_reserve_initrd(node);
326 363
327 /* 364 /*
328 * Remember the highest memory PFN. 365 * Remember the highest memory PFN.
@@ -331,6 +368,19 @@ void __init bootmem_init(struct meminfo *mi)
331 memend_pfn = end_pfn; 368 memend_pfn = end_pfn;
332 } 369 }
333 370
371 /*
372 * sparse_init() needs the bootmem allocator up and running.
373 */
374 sparse_init();
375
376 /*
377 * Now free memory in each node - free_area_init_node needs
378 * the sparse mem_map arrays initialized by sparse_init()
379 * for memmap_init_zone(), otherwise all PFNs are invalid.
380 */
381 for_each_node(node)
382 bootmem_free_node(node, mi);
383
334 high_memory = __va(memend_pfn << PAGE_SHIFT); 384 high_memory = __va(memend_pfn << PAGE_SHIFT);
335 385
336 /* 386 /*
@@ -401,7 +451,9 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi)
401 * information on the command line. 451 * information on the command line.
402 */ 452 */
403 for_each_nodebank(i, mi, node) { 453 for_each_nodebank(i, mi, node) {
404 bank_start = mi->bank[i].start >> PAGE_SHIFT; 454 struct membank *bank = &mi->bank[i];
455
456 bank_start = bank_pfn_start(bank);
405 if (bank_start < prev_bank_end) { 457 if (bank_start < prev_bank_end) {
406 printk(KERN_ERR "MEM: unordered memory banks. " 458 printk(KERN_ERR "MEM: unordered memory banks. "
407 "Not freeing memmap.\n"); 459 "Not freeing memmap.\n");
@@ -415,8 +467,7 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi)
415 if (prev_bank_end && prev_bank_end != bank_start) 467 if (prev_bank_end && prev_bank_end != bank_start)
416 free_memmap(node, prev_bank_end, bank_start); 468 free_memmap(node, prev_bank_end, bank_start);
417 469
418 prev_bank_end = (mi->bank[i].start + 470 prev_bank_end = bank_pfn_end(bank);
419 mi->bank[i].size) >> PAGE_SHIFT;
420 } 471 }
421} 472}
422 473
@@ -461,8 +512,8 @@ void __init mem_init(void)
461 512
462 num_physpages = 0; 513 num_physpages = 0;
463 for (i = 0; i < meminfo.nr_banks; i++) { 514 for (i = 0; i < meminfo.nr_banks; i++) {
464 num_physpages += meminfo.bank[i].size >> PAGE_SHIFT; 515 num_physpages += bank_pfn_size(&meminfo.bank[i]);
465 printk(" %ldMB", meminfo.bank[i].size >> 20); 516 printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20);
466 } 517 }
467 518
468 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); 519 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
diff --git a/arch/arm/mm/iomap.c b/arch/arm/mm/iomap.c
index 7429f8c01015..ffad039cbb73 100644
--- a/arch/arm/mm/iomap.c
+++ b/arch/arm/mm/iomap.c
@@ -7,8 +7,7 @@
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/pci.h> 8#include <linux/pci.h>
9#include <linux/ioport.h> 9#include <linux/ioport.h>
10 10#include <linux/io.h>
11#include <asm/io.h>
12 11
13#ifdef __io 12#ifdef __io
14void __iomem *ioport_map(unsigned long port, unsigned int nr) 13void __iomem *ioport_map(unsigned long port, unsigned int nr)
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index b81dbf9ffb77..18373f73f2fc 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -24,9 +24,10 @@
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/mm.h> 25#include <linux/mm.h>
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <linux/io.h>
27 28
29#include <asm/cputype.h>
28#include <asm/cacheflush.h> 30#include <asm/cacheflush.h>
29#include <asm/io.h>
30#include <asm/mmu_context.h> 31#include <asm/mmu_context.h>
31#include <asm/pgalloc.h> 32#include <asm/pgalloc.h>
32#include <asm/tlbflush.h> 33#include <asm/tlbflush.h>
@@ -55,8 +56,7 @@ static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end,
55 if (!pte_none(*pte)) 56 if (!pte_none(*pte))
56 goto bad; 57 goto bad;
57 58
58 set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), 59 set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), 0);
59 type->prot_pte_ext);
60 phys_addr += PAGE_SIZE; 60 phys_addr += PAGE_SIZE;
61 } while (pte++, addr += PAGE_SIZE, addr != end); 61 } while (pte++, addr += PAGE_SIZE, addr != end);
62 return 0; 62 return 0;
@@ -332,15 +332,14 @@ __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
332} 332}
333EXPORT_SYMBOL(__arm_ioremap); 333EXPORT_SYMBOL(__arm_ioremap);
334 334
335void __iounmap(volatile void __iomem *addr) 335void __iounmap(volatile void __iomem *io_addr)
336{ 336{
337 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
337#ifndef CONFIG_SMP 338#ifndef CONFIG_SMP
338 struct vm_struct **p, *tmp; 339 struct vm_struct **p, *tmp;
339#endif 340#endif
340 unsigned int section_mapping = 0; 341 unsigned int section_mapping = 0;
341 342
342 addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long)addr);
343
344#ifndef CONFIG_SMP 343#ifndef CONFIG_SMP
345 /* 344 /*
346 * If this is a section based mapping we need to handle it 345 * If this is a section based mapping we need to handle it
@@ -351,7 +350,7 @@ void __iounmap(volatile void __iomem *addr)
351 */ 350 */
352 write_lock(&vmlist_lock); 351 write_lock(&vmlist_lock);
353 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { 352 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
354 if((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { 353 if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
355 if (tmp->flags & VM_ARM_SECTION_MAPPING) { 354 if (tmp->flags & VM_ARM_SECTION_MAPPING) {
356 *p = tmp->next; 355 *p = tmp->next;
357 unmap_area_sections((unsigned long)tmp->addr, 356 unmap_area_sections((unsigned long)tmp->addr,
@@ -366,6 +365,6 @@ void __iounmap(volatile void __iomem *addr)
366#endif 365#endif
367 366
368 if (!section_mapping) 367 if (!section_mapping)
369 vunmap((void __force *)addr); 368 vunmap(addr);
370} 369}
371EXPORT_SYMBOL(__iounmap); 370EXPORT_SYMBOL(__iounmap);
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 7647c597fc59..5d9f53907b4e 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -18,7 +18,6 @@ static inline pmd_t *pmd_off_k(unsigned long virt)
18 18
19struct mem_type { 19struct mem_type {
20 unsigned int prot_pte; 20 unsigned int prot_pte;
21 unsigned int prot_pte_ext;
22 unsigned int prot_l1; 21 unsigned int prot_l1;
23 unsigned int prot_sect; 22 unsigned int prot_sect;
24 unsigned int domain; 23 unsigned int domain;
@@ -35,3 +34,5 @@ struct pglist_data;
35void __init create_mapping(struct map_desc *md); 34void __init create_mapping(struct map_desc *md);
36void __init bootmem_init(struct meminfo *mi); 35void __init bootmem_init(struct meminfo *mi);
37void reserve_node_zero(struct pglist_data *pgdat); 36void reserve_node_zero(struct pglist_data *pgdat);
37
38extern void _text, _stext, _etext, __data_start, _end, __init_begin, __init_end;
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 3f6dc40b8353..5358fcc7f61e 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -6,6 +6,8 @@
6#include <linux/mman.h> 6#include <linux/mman.h>
7#include <linux/shm.h> 7#include <linux/shm.h>
8#include <linux/sched.h> 8#include <linux/sched.h>
9#include <linux/io.h>
10#include <asm/cputype.h>
9#include <asm/system.h> 11#include <asm/system.h>
10 12
11#define COLOUR_ALIGN(addr,pgoff) \ 13#define COLOUR_ALIGN(addr,pgoff) \
@@ -37,8 +39,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
37 * caches alias. This is indicated by bits 9 and 21 of the 39 * caches alias. This is indicated by bits 9 and 21 of the
38 * cache type register. 40 * cache type register.
39 */ 41 */
40 cache_type = read_cpuid(CPUID_CACHETYPE); 42 cache_type = read_cpuid_cachetype();
41 if (cache_type != read_cpuid(CPUID_ID)) { 43 if (cache_type != read_cpuid_id()) {
42 aliasing = (cache_type | cache_type >> 12) & (1 << 11); 44 aliasing = (cache_type | cache_type >> 12) & (1 << 11);
43 if (aliasing) 45 if (aliasing)
44 do_align = filp || flags & MAP_SHARED; 46 do_align = filp || flags & MAP_SHARED;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 25d9a11eb617..8ba754064559 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -15,6 +15,7 @@
15#include <linux/mman.h> 15#include <linux/mman.h>
16#include <linux/nodemask.h> 16#include <linux/nodemask.h>
17 17
18#include <asm/cputype.h>
18#include <asm/mach-types.h> 19#include <asm/mach-types.h>
19#include <asm/setup.h> 20#include <asm/setup.h>
20#include <asm/sizes.h> 21#include <asm/sizes.h>
@@ -27,9 +28,6 @@
27 28
28DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 29DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
29 30
30extern void _stext, _etext, __data_start, _end;
31extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
32
33/* 31/*
34 * empty_zero_page is a special page that is used for 32 * empty_zero_page is a special page that is used for
35 * zero-initialized data and COW. 33 * zero-initialized data and COW.
@@ -68,27 +66,27 @@ static struct cachepolicy cache_policies[] __initdata = {
68 .policy = "uncached", 66 .policy = "uncached",
69 .cr_mask = CR_W|CR_C, 67 .cr_mask = CR_W|CR_C,
70 .pmd = PMD_SECT_UNCACHED, 68 .pmd = PMD_SECT_UNCACHED,
71 .pte = 0, 69 .pte = L_PTE_MT_UNCACHED,
72 }, { 70 }, {
73 .policy = "buffered", 71 .policy = "buffered",
74 .cr_mask = CR_C, 72 .cr_mask = CR_C,
75 .pmd = PMD_SECT_BUFFERED, 73 .pmd = PMD_SECT_BUFFERED,
76 .pte = PTE_BUFFERABLE, 74 .pte = L_PTE_MT_BUFFERABLE,
77 }, { 75 }, {
78 .policy = "writethrough", 76 .policy = "writethrough",
79 .cr_mask = 0, 77 .cr_mask = 0,
80 .pmd = PMD_SECT_WT, 78 .pmd = PMD_SECT_WT,
81 .pte = PTE_CACHEABLE, 79 .pte = L_PTE_MT_WRITETHROUGH,
82 }, { 80 }, {
83 .policy = "writeback", 81 .policy = "writeback",
84 .cr_mask = 0, 82 .cr_mask = 0,
85 .pmd = PMD_SECT_WB, 83 .pmd = PMD_SECT_WB,
86 .pte = PTE_BUFFERABLE|PTE_CACHEABLE, 84 .pte = L_PTE_MT_WRITEBACK,
87 }, { 85 }, {
88 .policy = "writealloc", 86 .policy = "writealloc",
89 .cr_mask = 0, 87 .cr_mask = 0,
90 .pmd = PMD_SECT_WBWA, 88 .pmd = PMD_SECT_WBWA,
91 .pte = PTE_BUFFERABLE|PTE_CACHEABLE, 89 .pte = L_PTE_MT_WRITEALLOC,
92 } 90 }
93}; 91};
94 92
@@ -186,29 +184,28 @@ void adjust_cr(unsigned long mask, unsigned long set)
186 184
187static struct mem_type mem_types[] = { 185static struct mem_type mem_types[] = {
188 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ 186 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
189 .prot_pte = PROT_PTE_DEVICE, 187 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
188 L_PTE_SHARED,
190 .prot_l1 = PMD_TYPE_TABLE, 189 .prot_l1 = PMD_TYPE_TABLE,
191 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED, 190 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED,
192 .domain = DOMAIN_IO, 191 .domain = DOMAIN_IO,
193 }, 192 },
194 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ 193 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
195 .prot_pte = PROT_PTE_DEVICE, 194 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
196 .prot_pte_ext = PTE_EXT_TEX(2),
197 .prot_l1 = PMD_TYPE_TABLE, 195 .prot_l1 = PMD_TYPE_TABLE,
198 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2), 196 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2),
199 .domain = DOMAIN_IO, 197 .domain = DOMAIN_IO,
200 }, 198 },
201 [MT_DEVICE_CACHED] = { /* ioremap_cached */ 199 [MT_DEVICE_CACHED] = { /* ioremap_cached */
202 .prot_pte = PROT_PTE_DEVICE | L_PTE_CACHEABLE | L_PTE_BUFFERABLE, 200 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
203 .prot_l1 = PMD_TYPE_TABLE, 201 .prot_l1 = PMD_TYPE_TABLE,
204 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, 202 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
205 .domain = DOMAIN_IO, 203 .domain = DOMAIN_IO,
206 }, 204 },
207 [MT_DEVICE_IXP2000] = { /* IXP2400 requires XCB=101 for on-chip I/O */ 205 [MT_DEVICE_WC] = { /* ioremap_wc */
208 .prot_pte = PROT_PTE_DEVICE, 206 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
209 .prot_l1 = PMD_TYPE_TABLE, 207 .prot_l1 = PMD_TYPE_TABLE,
210 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE | 208 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE,
211 PMD_SECT_TEX(1),
212 .domain = DOMAIN_IO, 209 .domain = DOMAIN_IO,
213 }, 210 },
214 [MT_CACHECLEAN] = { 211 [MT_CACHECLEAN] = {
@@ -253,7 +250,7 @@ static void __init build_mem_type_table(void)
253{ 250{
254 struct cachepolicy *cp; 251 struct cachepolicy *cp;
255 unsigned int cr = get_cr(); 252 unsigned int cr = get_cr();
256 unsigned int user_pgprot, kern_pgprot; 253 unsigned int user_pgprot, kern_pgprot, vecs_pgprot;
257 int cpu_arch = cpu_architecture(); 254 int cpu_arch = cpu_architecture();
258 int i; 255 int i;
259 256
@@ -271,6 +268,20 @@ static void __init build_mem_type_table(void)
271 cachepolicy = CPOLICY_WRITEBACK; 268 cachepolicy = CPOLICY_WRITEBACK;
272 ecc_mask = 0; 269 ecc_mask = 0;
273 } 270 }
271#ifdef CONFIG_SMP
272 cachepolicy = CPOLICY_WRITEALLOC;
273#endif
274
275 /*
276 * On non-Xscale3 ARMv5-and-older systems, use CB=01
277 * (Uncached/Buffered) for ioremap_wc() mappings. On XScale3
278 * and ARMv6+, use TEXCB=00100 mappings (Inner/Outer Uncacheable
279 * in xsc3 parlance, Uncached Normal in ARMv6 parlance).
280 */
281 if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) {
282 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
283 mem_types[MT_DEVICE_WC].prot_sect &= ~PMD_SECT_BUFFERABLE;
284 }
274 285
275 /* 286 /*
276 * ARMv5 and lower, bit 4 must be set for page tables. 287 * ARMv5 and lower, bit 4 must be set for page tables.
@@ -292,7 +303,15 @@ static void __init build_mem_type_table(void)
292 } 303 }
293 304
294 cp = &cache_policies[cachepolicy]; 305 cp = &cache_policies[cachepolicy];
295 kern_pgprot = user_pgprot = cp->pte; 306 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
307
308#ifndef CONFIG_SMP
309 /*
310 * Only use write-through for non-SMP systems
311 */
312 if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
313 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
314#endif
296 315
297 /* 316 /*
298 * Enable CPU-specific coherency if supported. 317 * Enable CPU-specific coherency if supported.
@@ -320,7 +339,6 @@ static void __init build_mem_type_table(void)
320 /* 339 /*
321 * Mark the device area as "shared device" 340 * Mark the device area as "shared device"
322 */ 341 */
323 mem_types[MT_DEVICE].prot_pte |= L_PTE_BUFFERABLE;
324 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; 342 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
325 343
326#ifdef CONFIG_SMP 344#ifdef CONFIG_SMP
@@ -329,30 +347,21 @@ static void __init build_mem_type_table(void)
329 */ 347 */
330 user_pgprot |= L_PTE_SHARED; 348 user_pgprot |= L_PTE_SHARED;
331 kern_pgprot |= L_PTE_SHARED; 349 kern_pgprot |= L_PTE_SHARED;
350 vecs_pgprot |= L_PTE_SHARED;
332 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 351 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
333#endif 352#endif
334 } 353 }
335 354
336 for (i = 0; i < 16; i++) { 355 for (i = 0; i < 16; i++) {
337 unsigned long v = pgprot_val(protection_map[i]); 356 unsigned long v = pgprot_val(protection_map[i]);
338 v = (v & ~(L_PTE_BUFFERABLE|L_PTE_CACHEABLE)) | user_pgprot; 357 protection_map[i] = __pgprot(v | user_pgprot);
339 protection_map[i] = __pgprot(v);
340 } 358 }
341 359
342 mem_types[MT_LOW_VECTORS].prot_pte |= kern_pgprot; 360 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
343 mem_types[MT_HIGH_VECTORS].prot_pte |= kern_pgprot; 361 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
344 362
345 if (cpu_arch >= CPU_ARCH_ARMv5) { 363 if (cpu_arch < CPU_ARCH_ARMv5)
346#ifndef CONFIG_SMP
347 /*
348 * Only use write-through for non-SMP systems
349 */
350 mem_types[MT_LOW_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
351 mem_types[MT_HIGH_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
352#endif
353 } else {
354 mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); 364 mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
355 }
356 365
357 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); 366 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
358 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | 367 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
@@ -400,8 +409,7 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
400 409
401 pte = pte_offset_kernel(pmd, addr); 410 pte = pte_offset_kernel(pmd, addr);
402 do { 411 do {
403 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 412 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
404 type->prot_pte_ext);
405 pfn++; 413 pfn++;
406 } while (pte++, addr += PAGE_SIZE, addr != end); 414 } while (pte++, addr += PAGE_SIZE, addr != end);
407} 415}
@@ -568,12 +576,35 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
568 create_mapping(io_desc + i); 576 create_mapping(io_desc + i);
569} 577}
570 578
579static unsigned long __initdata vmalloc_reserve = SZ_128M;
580
581/*
582 * vmalloc=size forces the vmalloc area to be exactly 'size'
583 * bytes. This can be used to increase (or decrease) the vmalloc
584 * area - the default is 128m.
585 */
586static void __init early_vmalloc(char **arg)
587{
588 vmalloc_reserve = memparse(*arg, arg);
589
590 if (vmalloc_reserve < SZ_16M) {
591 vmalloc_reserve = SZ_16M;
592 printk(KERN_WARNING
593 "vmalloc area too small, limiting to %luMB\n",
594 vmalloc_reserve >> 20);
595 }
596}
597__early_param("vmalloc=", early_vmalloc);
598
599#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
600
571static int __init check_membank_valid(struct membank *mb) 601static int __init check_membank_valid(struct membank *mb)
572{ 602{
573 /* 603 /*
574 * Check whether this memory region has non-zero size. 604 * Check whether this memory region has non-zero size or
605 * invalid node number.
575 */ 606 */
576 if (mb->size == 0) 607 if (mb->size == 0 || mb->node >= MAX_NUMNODES)
577 return 0; 608 return 0;
578 609
579 /* 610 /*
@@ -607,8 +638,7 @@ static int __init check_membank_valid(struct membank *mb)
607 638
608static void __init sanity_check_meminfo(struct meminfo *mi) 639static void __init sanity_check_meminfo(struct meminfo *mi)
609{ 640{
610 int i; 641 int i, j;
611 int j;
612 642
613 for (i = 0, j = 0; i < mi->nr_banks; i++) { 643 for (i = 0, j = 0; i < mi->nr_banks; i++) {
614 if (check_membank_valid(&mi->bank[i])) 644 if (check_membank_valid(&mi->bank[i]))
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 63c62fdea521..07b62b238979 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -7,16 +7,14 @@
7#include <linux/mm.h> 7#include <linux/mm.h>
8#include <linux/pagemap.h> 8#include <linux/pagemap.h>
9#include <linux/bootmem.h> 9#include <linux/bootmem.h>
10#include <linux/io.h>
10 11
11#include <asm/cacheflush.h> 12#include <asm/cacheflush.h>
12#include <asm/io.h>
13#include <asm/page.h> 13#include <asm/page.h>
14#include <asm/mach/arch.h> 14#include <asm/mach/arch.h>
15 15
16#include "mm.h" 16#include "mm.h"
17 17
18extern void _stext, __data_start, _end;
19
20/* 18/*
21 * Reserve the various regions of node 0 19 * Reserve the various regions of node 0
22 */ 20 */
@@ -43,12 +41,26 @@ void __init reserve_node_zero(pg_data_t *pgdat)
43 BOOTMEM_DEFAULT); 41 BOOTMEM_DEFAULT);
44} 42}
45 43
44static void __init sanity_check_meminfo(struct meminfo *mi)
45{
46 int i, j;
47
48 for (i = 0, j = 0; i < mi->nr_banks; i++) {
49 struct membank *mb = &mi->bank[i];
50
51 if (mb->size != 0 && mb->node < MAX_NUMNODES)
52 mi->bank[j++] = mi->bank[i];
53 }
54 mi->nr_banks = j;
55}
56
46/* 57/*
47 * paging_init() sets up the page tables, initialises the zone memory 58 * paging_init() sets up the page tables, initialises the zone memory
48 * maps, and sets up the zero page, bad page and bad page tables. 59 * maps, and sets up the zero page, bad page and bad page tables.
49 */ 60 */
50void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) 61void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
51{ 62{
63 sanity_check_meminfo(mi);
52 bootmem_init(mi); 64 bootmem_init(mi);
53} 65}
54 66
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 5673f4d6113b..b5551bf010aa 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -29,7 +29,7 @@
29#include <linux/init.h> 29#include <linux/init.h>
30#include <asm/assembler.h> 30#include <asm/assembler.h>
31#include <asm/asm-offsets.h> 31#include <asm/asm-offsets.h>
32#include <asm/elf.h> 32#include <asm/hwcap.h>
33#include <asm/pgtable-hwdef.h> 33#include <asm/pgtable-hwdef.h>
34#include <asm/pgtable.h> 34#include <asm/pgtable.h>
35#include <asm/ptrace.h> 35#include <asm/ptrace.h>
@@ -399,29 +399,7 @@ ENTRY(cpu_arm1020_switch_mm)
399 .align 5 399 .align 5
400ENTRY(cpu_arm1020_set_pte_ext) 400ENTRY(cpu_arm1020_set_pte_ext)
401#ifdef CONFIG_MMU 401#ifdef CONFIG_MMU
402 str r1, [r0], #-2048 @ linux version 402 armv3_set_pte_ext
403
404 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
405
406 bic r2, r1, #PTE_SMALL_AP_MASK
407 bic r2, r2, #PTE_TYPE_MASK
408 orr r2, r2, #PTE_TYPE_SMALL
409
410 tst r1, #L_PTE_USER @ User?
411 orrne r2, r2, #PTE_SMALL_AP_URO_SRW
412
413 tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
414 orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
415
416 tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
417 movne r2, #0
418
419#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
420 eor r3, r1, #0x0a @ C & small page?
421 tst r3, #0x0b
422 biceq r2, r2, #4
423#endif
424 str r2, [r0] @ hardware version
425 mov r0, r0 403 mov r0, r0
426#ifndef CONFIG_CPU_DCACHE_DISABLE 404#ifndef CONFIG_CPU_DCACHE_DISABLE
427 mcr p15, 0, r0, c7, c10, 4 405 mcr p15, 0, r0, c7, c10, 4
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 4343fdb0e9e5..8bc6740c29eb 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -29,7 +29,7 @@
29#include <linux/init.h> 29#include <linux/init.h>
30#include <asm/assembler.h> 30#include <asm/assembler.h>
31#include <asm/asm-offsets.h> 31#include <asm/asm-offsets.h>
32#include <asm/elf.h> 32#include <asm/hwcap.h>
33#include <asm/pgtable-hwdef.h> 33#include <asm/pgtable-hwdef.h>
34#include <asm/pgtable.h> 34#include <asm/pgtable.h>
35#include <asm/ptrace.h> 35#include <asm/ptrace.h>
@@ -383,29 +383,7 @@ ENTRY(cpu_arm1020e_switch_mm)
383 .align 5 383 .align 5
384ENTRY(cpu_arm1020e_set_pte_ext) 384ENTRY(cpu_arm1020e_set_pte_ext)
385#ifdef CONFIG_MMU 385#ifdef CONFIG_MMU
386 str r1, [r0], #-2048 @ linux version 386 armv3_set_pte_ext
387
388 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
389
390 bic r2, r1, #PTE_SMALL_AP_MASK
391 bic r2, r2, #PTE_TYPE_MASK
392 orr r2, r2, #PTE_TYPE_SMALL
393
394 tst r1, #L_PTE_USER @ User?
395 orrne r2, r2, #PTE_SMALL_AP_URO_SRW
396
397 tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
398 orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
399
400 tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
401 movne r2, #0
402
403#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
404 eor r3, r1, #0x0a @ C & small page?
405 tst r3, #0x0b
406 biceq r2, r2, #4
407#endif
408 str r2, [r0] @ hardware version
409 mov r0, r0 387 mov r0, r0
410#ifndef CONFIG_CPU_DCACHE_DISABLE 388#ifndef CONFIG_CPU_DCACHE_DISABLE
411 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 389 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 2a4ea1659e96..2cd03e66c0a3 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -18,7 +18,7 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <asm/assembler.h> 19#include <asm/assembler.h>
20#include <asm/asm-offsets.h> 20#include <asm/asm-offsets.h>
21#include <asm/elf.h> 21#include <asm/hwcap.h>
22#include <asm/pgtable-hwdef.h> 22#include <asm/pgtable-hwdef.h>
23#include <asm/pgtable.h> 23#include <asm/pgtable.h>
24#include <asm/ptrace.h> 24#include <asm/ptrace.h>
@@ -365,29 +365,7 @@ ENTRY(cpu_arm1022_switch_mm)
365 .align 5 365 .align 5
366ENTRY(cpu_arm1022_set_pte_ext) 366ENTRY(cpu_arm1022_set_pte_ext)
367#ifdef CONFIG_MMU 367#ifdef CONFIG_MMU
368 str r1, [r0], #-2048 @ linux version 368 armv3_set_pte_ext
369
370 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
371
372 bic r2, r1, #PTE_SMALL_AP_MASK
373 bic r2, r2, #PTE_TYPE_MASK
374 orr r2, r2, #PTE_TYPE_SMALL
375
376 tst r1, #L_PTE_USER @ User?
377 orrne r2, r2, #PTE_SMALL_AP_URO_SRW
378
379 tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
380 orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
381
382 tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
383 movne r2, #0
384
385#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
386 eor r3, r1, #0x0a @ C & small page?
387 tst r3, #0x0b
388 biceq r2, r2, #4
389#endif
390 str r2, [r0] @ hardware version
391 mov r0, r0 369 mov r0, r0
392#ifndef CONFIG_CPU_DCACHE_DISABLE 370#ifndef CONFIG_CPU_DCACHE_DISABLE
393 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 371 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 77a1babd421c..ad961a897f6e 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -18,7 +18,7 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <asm/assembler.h> 19#include <asm/assembler.h>
20#include <asm/asm-offsets.h> 20#include <asm/asm-offsets.h>
21#include <asm/elf.h> 21#include <asm/hwcap.h>
22#include <asm/pgtable-hwdef.h> 22#include <asm/pgtable-hwdef.h>
23#include <asm/pgtable.h> 23#include <asm/pgtable.h>
24#include <asm/ptrace.h> 24#include <asm/ptrace.h>
@@ -354,29 +354,7 @@ ENTRY(cpu_arm1026_switch_mm)
354 .align 5 354 .align 5
355ENTRY(cpu_arm1026_set_pte_ext) 355ENTRY(cpu_arm1026_set_pte_ext)
356#ifdef CONFIG_MMU 356#ifdef CONFIG_MMU
357 str r1, [r0], #-2048 @ linux version 357 armv3_set_pte_ext
358
359 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
360
361 bic r2, r1, #PTE_SMALL_AP_MASK
362 bic r2, r2, #PTE_TYPE_MASK
363 orr r2, r2, #PTE_TYPE_SMALL
364
365 tst r1, #L_PTE_USER @ User?
366 orrne r2, r2, #PTE_SMALL_AP_URO_SRW
367
368 tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
369 orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
370
371 tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
372 movne r2, #0
373
374#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
375 eor r3, r1, #0x0a @ C & small page?
376 tst r3, #0x0b
377 biceq r2, r2, #4
378#endif
379 str r2, [r0] @ hardware version
380 mov r0, r0 358 mov r0, r0
381#ifndef CONFIG_CPU_DCACHE_DISABLE 359#ifndef CONFIG_CPU_DCACHE_DISABLE
382 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 360 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S
index c371fc87776e..80d6e1de069a 100644
--- a/arch/arm/mm/proc-arm6_7.S
+++ b/arch/arm/mm/proc-arm6_7.S
@@ -15,11 +15,13 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <asm/assembler.h> 16#include <asm/assembler.h>
17#include <asm/asm-offsets.h> 17#include <asm/asm-offsets.h>
18#include <asm/elf.h> 18#include <asm/hwcap.h>
19#include <asm/pgtable-hwdef.h> 19#include <asm/pgtable-hwdef.h>
20#include <asm/pgtable.h> 20#include <asm/pgtable.h>
21#include <asm/ptrace.h> 21#include <asm/ptrace.h>
22 22
23#include "proc-macros.S"
24
23ENTRY(cpu_arm6_dcache_clean_area) 25ENTRY(cpu_arm6_dcache_clean_area)
24ENTRY(cpu_arm7_dcache_clean_area) 26ENTRY(cpu_arm7_dcache_clean_area)
25 mov pc, lr 27 mov pc, lr
@@ -214,30 +216,13 @@ ENTRY(cpu_arm7_switch_mm)
214 * : r1 = value to set 216 * : r1 = value to set
215 * Purpose : Set a PTE and flush it out of any WB cache 217 * Purpose : Set a PTE and flush it out of any WB cache
216 */ 218 */
217 .align 5 219 .align 5
218ENTRY(cpu_arm6_set_pte_ext) 220ENTRY(cpu_arm6_set_pte_ext)
219ENTRY(cpu_arm7_set_pte_ext) 221ENTRY(cpu_arm7_set_pte_ext)
220#ifdef CONFIG_MMU 222#ifdef CONFIG_MMU
221 str r1, [r0], #-2048 @ linux version 223 armv3_set_pte_ext wc_disable=0
222
223 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
224
225 bic r2, r1, #PTE_SMALL_AP_MASK
226 bic r2, r2, #PTE_TYPE_MASK
227 orr r2, r2, #PTE_TYPE_SMALL
228
229 tst r1, #L_PTE_USER @ User?
230 orrne r2, r2, #PTE_SMALL_AP_URO_SRW
231
232 tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
233 orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
234
235 tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young
236 movne r2, #0
237
238 str r2, [r0] @ hardware version
239#endif /* CONFIG_MMU */ 224#endif /* CONFIG_MMU */
240 mov pc, lr 225 mov pc, lr
241 226
242/* 227/*
243 * Function: _arm6_7_reset 228 * Function: _arm6_7_reset
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index eda733d30455..85ae18695f10 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -36,7 +36,7 @@
36#include <linux/init.h> 36#include <linux/init.h>
37#include <asm/assembler.h> 37#include <asm/assembler.h>
38#include <asm/asm-offsets.h> 38#include <asm/asm-offsets.h>
39#include <asm/elf.h> 39#include <asm/hwcap.h>
40#include <asm/pgtable-hwdef.h> 40#include <asm/pgtable-hwdef.h>
41#include <asm/pgtable.h> 41#include <asm/pgtable.h>
42#include <asm/ptrace.h> 42#include <asm/ptrace.h>
@@ -93,29 +93,12 @@ ENTRY(cpu_arm720_switch_mm)
93 * : r1 = value to set 93 * : r1 = value to set
94 * Purpose : Set a PTE and flush it out of any WB cache 94 * Purpose : Set a PTE and flush it out of any WB cache
95 */ 95 */
96 .align 5 96 .align 5
97ENTRY(cpu_arm720_set_pte_ext) 97ENTRY(cpu_arm720_set_pte_ext)
98#ifdef CONFIG_MMU 98#ifdef CONFIG_MMU
99 str r1, [r0], #-2048 @ linux version 99 armv3_set_pte_ext wc_disable=0
100
101 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
102
103 bic r2, r1, #PTE_SMALL_AP_MASK
104 bic r2, r2, #PTE_TYPE_MASK
105 orr r2, r2, #PTE_TYPE_SMALL
106
107 tst r1, #L_PTE_USER @ User?
108 orrne r2, r2, #PTE_SMALL_AP_URO_SRW
109
110 tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
111 orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
112
113 tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young
114 movne r2, #0
115
116 str r2, [r0] @ hardware version
117#endif 100#endif
118 mov pc, lr 101 mov pc, lr
119 102
120/* 103/*
121 * Function: arm720_reset 104 * Function: arm720_reset
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index 3a57376c8bc9..4f95bee63e95 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -12,7 +12,7 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <asm/assembler.h> 13#include <asm/assembler.h>
14#include <asm/asm-offsets.h> 14#include <asm/asm-offsets.h>
15#include <asm/elf.h> 15#include <asm/hwcap.h>
16#include <asm/pgtable-hwdef.h> 16#include <asm/pgtable-hwdef.h>
17#include <asm/pgtable.h> 17#include <asm/pgtable.h>
18#include <asm/ptrace.h> 18#include <asm/ptrace.h>
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index 7b3ecdeb5370..93e05fa7bed4 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -12,7 +12,7 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <asm/assembler.h> 13#include <asm/assembler.h>
14#include <asm/asm-offsets.h> 14#include <asm/asm-offsets.h>
15#include <asm/elf.h> 15#include <asm/hwcap.h>
16#include <asm/pgtable-hwdef.h> 16#include <asm/pgtable-hwdef.h>
17#include <asm/pgtable.h> 17#include <asm/pgtable.h>
18#include <asm/ptrace.h> 18#include <asm/ptrace.h>
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 28cdb060df45..914d688394fc 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -28,7 +28,7 @@
28#include <linux/linkage.h> 28#include <linux/linkage.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <asm/assembler.h> 30#include <asm/assembler.h>
31#include <asm/elf.h> 31#include <asm/hwcap.h>
32#include <asm/pgtable-hwdef.h> 32#include <asm/pgtable-hwdef.h>
33#include <asm/pgtable.h> 33#include <asm/pgtable.h>
34#include <asm/page.h> 34#include <asm/page.h>
@@ -351,33 +351,11 @@ ENTRY(cpu_arm920_switch_mm)
351 .align 5 351 .align 5
352ENTRY(cpu_arm920_set_pte_ext) 352ENTRY(cpu_arm920_set_pte_ext)
353#ifdef CONFIG_MMU 353#ifdef CONFIG_MMU
354 str r1, [r0], #-2048 @ linux version 354 armv3_set_pte_ext
355
356 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
357
358 bic r2, r1, #PTE_SMALL_AP_MASK
359 bic r2, r2, #PTE_TYPE_MASK
360 orr r2, r2, #PTE_TYPE_SMALL
361
362 tst r1, #L_PTE_USER @ User?
363 orrne r2, r2, #PTE_SMALL_AP_URO_SRW
364
365 tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
366 orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
367
368 tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
369 movne r2, #0
370
371#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
372 eor r3, r2, #0x0a @ C & small page?
373 tst r3, #0x0b
374 biceq r2, r2, #4
375#endif
376 str r2, [r0] @ hardware version
377 mov r0, r0 355 mov r0, r0
378 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 356 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
379 mcr p15, 0, r0, c7, c10, 4 @ drain WB 357 mcr p15, 0, r0, c7, c10, 4 @ drain WB
380#endif /* CONFIG_MMU */ 358#endif
381 mov pc, lr 359 mov pc, lr
382 360
383 __INIT 361 __INIT
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 94ddcb4a4b76..51c9c9859e58 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -29,7 +29,7 @@
29#include <linux/linkage.h> 29#include <linux/linkage.h>
30#include <linux/init.h> 30#include <linux/init.h>
31#include <asm/assembler.h> 31#include <asm/assembler.h>
32#include <asm/elf.h> 32#include <asm/hwcap.h>
33#include <asm/pgtable-hwdef.h> 33#include <asm/pgtable-hwdef.h>
34#include <asm/pgtable.h> 34#include <asm/pgtable.h>
35#include <asm/page.h> 35#include <asm/page.h>
@@ -355,29 +355,7 @@ ENTRY(cpu_arm922_switch_mm)
355 .align 5 355 .align 5
356ENTRY(cpu_arm922_set_pte_ext) 356ENTRY(cpu_arm922_set_pte_ext)
357#ifdef CONFIG_MMU 357#ifdef CONFIG_MMU
358 str r1, [r0], #-2048 @ linux version 358 armv3_set_pte_ext
359
360 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
361
362 bic r2, r1, #PTE_SMALL_AP_MASK
363 bic r2, r2, #PTE_TYPE_MASK
364 orr r2, r2, #PTE_TYPE_SMALL
365
366 tst r1, #L_PTE_USER @ User?
367 orrne r2, r2, #PTE_SMALL_AP_URO_SRW
368
369 tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
370 orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
371
372 tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
373 movne r2, #0
374
375#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
376 eor r3, r2, #0x0a @ C & small page?
377 tst r3, #0x0b
378 biceq r2, r2, #4
379#endif
380 str r2, [r0] @ hardware version
381 mov r0, r0 359 mov r0, r0
382 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 360 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
383 mcr p15, 0, r0, c7, c10, 4 @ drain WB 361 mcr p15, 0, r0, c7, c10, 4 @ drain WB
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index d045812f3399..2724526d89c1 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -52,7 +52,7 @@
52#include <linux/linkage.h> 52#include <linux/linkage.h>
53#include <linux/init.h> 53#include <linux/init.h>
54#include <asm/assembler.h> 54#include <asm/assembler.h>
55#include <asm/elf.h> 55#include <asm/hwcap.h>
56#include <asm/pgtable-hwdef.h> 56#include <asm/pgtable-hwdef.h>
57#include <asm/pgtable.h> 57#include <asm/pgtable.h>
58#include <asm/page.h> 58#include <asm/page.h>
@@ -398,29 +398,7 @@ ENTRY(cpu_arm925_switch_mm)
398 .align 5 398 .align 5
399ENTRY(cpu_arm925_set_pte_ext) 399ENTRY(cpu_arm925_set_pte_ext)
400#ifdef CONFIG_MMU 400#ifdef CONFIG_MMU
401 str r1, [r0], #-2048 @ linux version 401 armv3_set_pte_ext
402
403 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
404
405 bic r2, r1, #PTE_SMALL_AP_MASK
406 bic r2, r2, #PTE_TYPE_MASK
407 orr r2, r2, #PTE_TYPE_SMALL
408
409 tst r1, #L_PTE_USER @ User?
410 orrne r2, r2, #PTE_SMALL_AP_URO_SRW
411
412 tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
413 orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
414
415 tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
416 movne r2, #0
417
418#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
419 eor r3, r2, #0x0a @ C & small page?
420 tst r3, #0x0b
421 biceq r2, r2, #4
422#endif
423 str r2, [r0] @ hardware version
424 mov r0, r0 402 mov r0, r0
425#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 403#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
426 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 404 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 4cd33169a7c9..54466937bff9 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -28,7 +28,7 @@
28#include <linux/linkage.h> 28#include <linux/linkage.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <asm/assembler.h> 30#include <asm/assembler.h>
31#include <asm/elf.h> 31#include <asm/hwcap.h>
32#include <asm/pgtable-hwdef.h> 32#include <asm/pgtable-hwdef.h>
33#include <asm/pgtable.h> 33#include <asm/pgtable.h>
34#include <asm/page.h> 34#include <asm/page.h>
@@ -359,29 +359,7 @@ ENTRY(cpu_arm926_switch_mm)
359 .align 5 359 .align 5
360ENTRY(cpu_arm926_set_pte_ext) 360ENTRY(cpu_arm926_set_pte_ext)
361#ifdef CONFIG_MMU 361#ifdef CONFIG_MMU
362 str r1, [r0], #-2048 @ linux version 362 armv3_set_pte_ext
363
364 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
365
366 bic r2, r1, #PTE_SMALL_AP_MASK
367 bic r2, r2, #PTE_TYPE_MASK
368 orr r2, r2, #PTE_TYPE_SMALL
369
370 tst r1, #L_PTE_USER @ User?
371 orrne r2, r2, #PTE_SMALL_AP_URO_SRW
372
373 tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
374 orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
375
376 tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
377 movne r2, #0
378
379#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
380 eor r3, r2, #0x0a @ C & small page?
381 tst r3, #0x0b
382 biceq r2, r2, #4
383#endif
384 str r2, [r0] @ hardware version
385 mov r0, r0 363 mov r0, r0
386#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 364#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
387 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 365 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 551244d5ca19..f595117caf55 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -11,7 +11,7 @@
11#include <linux/linkage.h> 11#include <linux/linkage.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <asm/assembler.h> 13#include <asm/assembler.h>
14#include <asm/elf.h> 14#include <asm/hwcap.h>
15#include <asm/pgtable-hwdef.h> 15#include <asm/pgtable-hwdef.h>
16#include <asm/pgtable.h> 16#include <asm/pgtable.h>
17#include <asm/ptrace.h> 17#include <asm/ptrace.h>
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 6168c6160dee..e03f6ff1fb26 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -13,7 +13,7 @@
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <asm/assembler.h> 15#include <asm/assembler.h>
16#include <asm/elf.h> 16#include <asm/hwcap.h>
17#include <asm/pgtable-hwdef.h> 17#include <asm/pgtable-hwdef.h>
18#include <asm/pgtable.h> 18#include <asm/pgtable.h>
19#include <asm/ptrace.h> 19#include <asm/ptrace.h>
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index c85c1f50e396..be6c11d2b3fb 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -12,7 +12,7 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <asm/assembler.h> 13#include <asm/assembler.h>
14#include <asm/asm-offsets.h> 14#include <asm/asm-offsets.h>
15#include <asm/elf.h> 15#include <asm/hwcap.h>
16#include <asm/pgtable-hwdef.h> 16#include <asm/pgtable-hwdef.h>
17#include <asm/pgtable.h> 17#include <asm/pgtable.h>
18#include <asm/ptrace.h> 18#include <asm/ptrace.h>
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index f2e5884c513a..0fe1f8fc3488 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -22,7 +22,7 @@
22#include <linux/linkage.h> 22#include <linux/linkage.h>
23#include <linux/init.h> 23#include <linux/init.h>
24#include <asm/assembler.h> 24#include <asm/assembler.h>
25#include <asm/elf.h> 25#include <asm/hwcap.h>
26#include <asm/pgtable-hwdef.h> 26#include <asm/pgtable-hwdef.h>
27#include <asm/pgtable.h> 27#include <asm/pgtable.h>
28#include <asm/page.h> 28#include <asm/page.h>
@@ -80,7 +80,8 @@ ENTRY(cpu_feroceon_proc_fin)
80 msr cpsr_c, ip 80 msr cpsr_c, ip
81 bl feroceon_flush_kern_cache_all 81 bl feroceon_flush_kern_cache_all
82 82
83#if defined(CONFIG_CACHE_FEROCEON_L2) && !defined(CONFIG_L2_CACHE_WRITETHROUGH) 83#if defined(CONFIG_CACHE_FEROCEON_L2) && \
84 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
84 mov r0, #0 85 mov r0, #0
85 mcr p15, 1, r0, c15, c9, 0 @ clean L2 86 mcr p15, 1, r0, c15, c9, 0 @ clean L2
86 mcr p15, 0, r0, c7, c10, 4 @ drain WB 87 mcr p15, 0, r0, c7, c10, 4 @ drain WB
@@ -389,7 +390,8 @@ ENTRY(feroceon_range_cache_fns)
389 390
390 .align 5 391 .align 5
391ENTRY(cpu_feroceon_dcache_clean_area) 392ENTRY(cpu_feroceon_dcache_clean_area)
392#if defined(CONFIG_CACHE_FEROCEON_L2) && !defined(CONFIG_L2_CACHE_WRITETHROUGH) 393#if defined(CONFIG_CACHE_FEROCEON_L2) && \
394 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
393 mov r2, r0 395 mov r2, r0
394 mov r3, r1 396 mov r3, r1
395#endif 397#endif
@@ -397,7 +399,8 @@ ENTRY(cpu_feroceon_dcache_clean_area)
397 add r0, r0, #CACHE_DLINESIZE 399 add r0, r0, #CACHE_DLINESIZE
398 subs r1, r1, #CACHE_DLINESIZE 400 subs r1, r1, #CACHE_DLINESIZE
399 bhi 1b 401 bhi 1b
400#if defined(CONFIG_CACHE_FEROCEON_L2) && !defined(CONFIG_L2_CACHE_WRITETHROUGH) 402#if defined(CONFIG_CACHE_FEROCEON_L2) && \
403 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
4011: mcr p15, 1, r2, c15, c9, 1 @ clean L2 entry 4041: mcr p15, 1, r2, c15, c9, 1 @ clean L2 entry
402 add r2, r2, #CACHE_DLINESIZE 405 add r2, r2, #CACHE_DLINESIZE
403 subs r3, r3, #CACHE_DLINESIZE 406 subs r3, r3, #CACHE_DLINESIZE
@@ -446,27 +449,11 @@ ENTRY(cpu_feroceon_switch_mm)
446 .align 5 449 .align 5
447ENTRY(cpu_feroceon_set_pte_ext) 450ENTRY(cpu_feroceon_set_pte_ext)
448#ifdef CONFIG_MMU 451#ifdef CONFIG_MMU
449 str r1, [r0], #-2048 @ linux version 452 armv3_set_pte_ext wc_disable=0
450
451 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
452
453 bic r2, r1, #PTE_SMALL_AP_MASK
454 bic r2, r2, #PTE_TYPE_MASK
455 orr r2, r2, #PTE_TYPE_SMALL
456
457 tst r1, #L_PTE_USER @ User?
458 orrne r2, r2, #PTE_SMALL_AP_URO_SRW
459
460 tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
461 orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
462
463 tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
464 movne r2, #0
465
466 str r2, [r0] @ hardware version
467 mov r0, r0 453 mov r0, r0
468 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 454 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
469#if defined(CONFIG_CACHE_FEROCEON_L2) && !defined(CONFIG_L2_CACHE_WRITETHROUGH) 455#if defined(CONFIG_CACHE_FEROCEON_L2) && \
456 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
470 mcr p15, 1, r0, c15, c9, 1 @ clean L2 entry 457 mcr p15, 1, r0, c15, c9, 1 @ clean L2 entry
471#endif 458#endif
472 mcr p15, 0, r0, c7, c10, 4 @ drain WB 459 mcr p15, 0, r0, c7, c10, 4 @ drain WB
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index b13150052a76..54b1f721dec8 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -71,3 +71,173 @@
71 mov \reg, #16 @ size offset 71 mov \reg, #16 @ size offset
72 mov \reg, \reg, lsl \tmp @ actual cache line size 72 mov \reg, \reg, lsl \tmp @ actual cache line size
73 .endm 73 .endm
74
75
76/*
77 * Sanity check the PTE configuration for the code below - which makes
78 * certain assumptions about how these bits are layed out.
79 */
80#if L_PTE_SHARED != PTE_EXT_SHARED
81#error PTE shared bit mismatch
82#endif
83#if L_PTE_BUFFERABLE != PTE_BUFFERABLE
84#error PTE bufferable bit mismatch
85#endif
86#if L_PTE_CACHEABLE != PTE_CACHEABLE
87#error PTE cacheable bit mismatch
88#endif
89#if (L_PTE_EXEC+L_PTE_USER+L_PTE_WRITE+L_PTE_DIRTY+L_PTE_YOUNG+\
90 L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED
91#error Invalid Linux PTE bit settings
92#endif
93
94/*
95 * The ARMv6 and ARMv7 set_pte_ext translation function.
96 *
97 * Permission translation:
98 * YUWD APX AP1 AP0 SVC User
99 * 0xxx 0 0 0 no acc no acc
100 * 100x 1 0 1 r/o no acc
101 * 10x0 1 0 1 r/o no acc
102 * 1011 0 0 1 r/w no acc
103 * 110x 0 1 0 r/w r/o
104 * 11x0 0 1 0 r/w r/o
105 * 1111 0 1 1 r/w r/w
106 */
107 .macro armv6_mt_table pfx
108\pfx\()_mt_table:
109 .long 0x00 @ L_PTE_MT_UNCACHED
110 .long PTE_EXT_TEX(1) @ L_PTE_MT_BUFFERABLE
111 .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH
112 .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK
113 .long PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED
114 .long 0x00 @ unused
115 .long 0x00 @ L_PTE_MT_MINICACHE (not present)
116 .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC
117 .long 0x00 @ unused
118 .long PTE_EXT_TEX(1) @ L_PTE_MT_DEV_WC
119 .long 0x00 @ unused
120 .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED
121 .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED
122 .long 0x00 @ unused
123 .long 0x00 @ unused
124 .long 0x00 @ unused
125 .endm
126
127 .macro armv6_set_pte_ext pfx
128 str r1, [r0], #-2048 @ linux version
129
130 bic r3, r1, #0x000003fc
131 bic r3, r3, #PTE_TYPE_MASK
132 orr r3, r3, r2
133 orr r3, r3, #PTE_EXT_AP0 | 2
134
135 adr ip, \pfx\()_mt_table
136 and r2, r1, #L_PTE_MT_MASK
137 ldr r2, [ip, r2]
138
139 tst r1, #L_PTE_WRITE
140 tstne r1, #L_PTE_DIRTY
141 orreq r3, r3, #PTE_EXT_APX
142
143 tst r1, #L_PTE_USER
144 orrne r3, r3, #PTE_EXT_AP1
145 tstne r3, #PTE_EXT_APX
146 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
147
148 tst r1, #L_PTE_EXEC
149 orreq r3, r3, #PTE_EXT_XN
150
151 orr r3, r3, r2
152
153 tst r1, #L_PTE_YOUNG
154 tstne r1, #L_PTE_PRESENT
155 moveq r3, #0
156
157 str r3, [r0]
158 mcr p15, 0, r0, c7, c10, 1 @ flush_pte
159 .endm
160
161
162/*
163 * The ARMv3, ARMv4 and ARMv5 set_pte_ext translation function,
164 * covering most CPUs except Xscale and Xscale 3.
165 *
166 * Permission translation:
167 * YUWD AP SVC User
168 * 0xxx 0x00 no acc no acc
169 * 100x 0x00 r/o no acc
170 * 10x0 0x00 r/o no acc
171 * 1011 0x55 r/w no acc
172 * 110x 0xaa r/w r/o
173 * 11x0 0xaa r/w r/o
174 * 1111 0xff r/w r/w
175 */
176 .macro armv3_set_pte_ext wc_disable=1
177 str r1, [r0], #-2048 @ linux version
178
179 eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
180
181 bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits
182 bic r2, r2, #PTE_TYPE_MASK
183 orr r2, r2, #PTE_TYPE_SMALL
184
185 tst r3, #L_PTE_USER @ user?
186 orrne r2, r2, #PTE_SMALL_AP_URO_SRW
187
188 tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ write and dirty?
189 orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
190
191 tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young?
192 movne r2, #0
193
194 .if \wc_disable
195#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
196 tst r2, #PTE_CACHEABLE
197 bicne r2, r2, #PTE_BUFFERABLE
198#endif
199 .endif
200 str r2, [r0] @ hardware version
201 .endm
202
203
204/*
205 * Xscale set_pte_ext translation, split into two halves to cope
206 * with work-arounds. r3 must be preserved by code between these
207 * two macros.
208 *
209 * Permission translation:
210 * YUWD AP SVC User
211 * 0xxx 00 no acc no acc
212 * 100x 00 r/o no acc
213 * 10x0 00 r/o no acc
214 * 1011 01 r/w no acc
215 * 110x 10 r/w r/o
216 * 11x0 10 r/w r/o
217 * 1111 11 r/w r/w
218 */
219 .macro xscale_set_pte_ext_prologue
220 str r1, [r0], #-2048 @ linux version
221
222 eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
223
224 bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits
225 orr r2, r2, #PTE_TYPE_EXT @ extended page
226
227 tst r3, #L_PTE_USER @ user?
228 orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w
229
230 tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ write and dirty?
231 orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w
232 @ combined with user -> user r/w
233 .endm
234
235 .macro xscale_set_pte_ext_epilogue
236 tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young?
237 movne r2, #0 @ no -> fault
238
239 str r2, [r0] @ hardware version
240 mov ip, #0
241 mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
242 mcr p15, 0, ip, c7, c10, 4 @ data write barrier
243 .endm
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index bbe10576c861..90a7e5279f29 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -17,7 +17,7 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <asm/assembler.h> 18#include <asm/assembler.h>
19#include <asm/asm-offsets.h> 19#include <asm/asm-offsets.h>
20#include <asm/elf.h> 20#include <asm/hwcap.h>
21#include <mach/hardware.h> 21#include <mach/hardware.h>
22#include <asm/pgtable-hwdef.h> 22#include <asm/pgtable-hwdef.h>
23#include <asm/pgtable.h> 23#include <asm/pgtable.h>
@@ -153,24 +153,7 @@ ENTRY(cpu_sa110_switch_mm)
153 .align 5 153 .align 5
154ENTRY(cpu_sa110_set_pte_ext) 154ENTRY(cpu_sa110_set_pte_ext)
155#ifdef CONFIG_MMU 155#ifdef CONFIG_MMU
156 str r1, [r0], #-2048 @ linux version 156 armv3_set_pte_ext wc_disable=0
157
158 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
159
160 bic r2, r1, #PTE_SMALL_AP_MASK
161 bic r2, r2, #PTE_TYPE_MASK
162 orr r2, r2, #PTE_TYPE_SMALL
163
164 tst r1, #L_PTE_USER @ User?
165 orrne r2, r2, #PTE_SMALL_AP_URO_SRW
166
167 tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
168 orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
169
170 tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
171 movne r2, #0
172
173 str r2, [r0] @ hardware version
174 mov r0, r0 157 mov r0, r0
175 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 158 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
176 mcr p15, 0, r0, c7, c10, 4 @ drain WB 159 mcr p15, 0, r0, c7, c10, 4 @ drain WB
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 871ba018252e..451e2d953e2a 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -22,7 +22,7 @@
22#include <linux/init.h> 22#include <linux/init.h>
23#include <asm/assembler.h> 23#include <asm/assembler.h>
24#include <asm/asm-offsets.h> 24#include <asm/asm-offsets.h>
25#include <asm/elf.h> 25#include <asm/hwcap.h>
26#include <mach/hardware.h> 26#include <mach/hardware.h>
27#include <asm/pgtable-hwdef.h> 27#include <asm/pgtable-hwdef.h>
28#include <asm/pgtable.h> 28#include <asm/pgtable.h>
@@ -166,24 +166,7 @@ ENTRY(cpu_sa1100_switch_mm)
166 .align 5 166 .align 5
167ENTRY(cpu_sa1100_set_pte_ext) 167ENTRY(cpu_sa1100_set_pte_ext)
168#ifdef CONFIG_MMU 168#ifdef CONFIG_MMU
169 str r1, [r0], #-2048 @ linux version 169 armv3_set_pte_ext wc_disable=0
170
171 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
172
173 bic r2, r1, #PTE_SMALL_AP_MASK
174 bic r2, r2, #PTE_TYPE_MASK
175 orr r2, r2, #PTE_TYPE_SMALL
176
177 tst r1, #L_PTE_USER @ User?
178 orrne r2, r2, #PTE_SMALL_AP_URO_SRW
179
180 tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
181 orreq r2, r2, #PTE_SMALL_AP_UNO_SRW
182
183 tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
184 movne r2, #0
185
186 str r2, [r0] @ hardware version
187 mov r0, r0 170 mov r0, r0
188 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 171 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
189 mcr p15, 0, r0, c7, c10, 4 @ drain WB 172 mcr p15, 0, r0, c7, c10, 4 @ drain WB
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 5702ec58b2a2..294943b85973 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -13,7 +13,7 @@
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/assembler.h> 14#include <asm/assembler.h>
15#include <asm/asm-offsets.h> 15#include <asm/asm-offsets.h>
16#include <asm/elf.h> 16#include <asm/hwcap.h>
17#include <asm/pgtable-hwdef.h> 17#include <asm/pgtable-hwdef.h>
18#include <asm/pgtable.h> 18#include <asm/pgtable.h>
19 19
@@ -114,46 +114,12 @@ ENTRY(cpu_v6_switch_mm)
114 * (hardware version is stored at -1024 bytes) 114 * (hardware version is stored at -1024 bytes)
115 * - pte - PTE value to store 115 * - pte - PTE value to store
116 * - ext - value for extended PTE bits 116 * - ext - value for extended PTE bits
117 *
118 * Permissions:
119 * YUWD APX AP1 AP0 SVC User
120 * 0xxx 0 0 0 no acc no acc
121 * 100x 1 0 1 r/o no acc
122 * 10x0 1 0 1 r/o no acc
123 * 1011 0 0 1 r/w no acc
124 * 110x 0 1 0 r/w r/o
125 * 11x0 0 1 0 r/w r/o
126 * 1111 0 1 1 r/w r/w
127 */ 117 */
118 armv6_mt_table cpu_v6
119
128ENTRY(cpu_v6_set_pte_ext) 120ENTRY(cpu_v6_set_pte_ext)
129#ifdef CONFIG_MMU 121#ifdef CONFIG_MMU
130 str r1, [r0], #-2048 @ linux version 122 armv6_set_pte_ext cpu_v6
131
132 bic r3, r1, #0x000003f0
133 bic r3, r3, #0x00000003
134 orr r3, r3, r2
135 orr r3, r3, #PTE_EXT_AP0 | 2
136
137 tst r1, #L_PTE_WRITE
138 tstne r1, #L_PTE_DIRTY
139 orreq r3, r3, #PTE_EXT_APX
140
141 tst r1, #L_PTE_USER
142 orrne r3, r3, #PTE_EXT_AP1
143 tstne r3, #PTE_EXT_APX
144 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
145
146 tst r1, #L_PTE_YOUNG
147 biceq r3, r3, #PTE_EXT_APX | PTE_EXT_AP_MASK
148
149 tst r1, #L_PTE_EXEC
150 orreq r3, r3, #PTE_EXT_XN
151
152 tst r1, #L_PTE_PRESENT
153 moveq r3, #0
154
155 str r3, [r0]
156 mcr p15, 0, r0, c7, c10, 1 @ flush_pte
157#endif 123#endif
158 mov pc, lr 124 mov pc, lr
159 125
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index b49f9a4c82c8..34e424041927 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -12,7 +12,7 @@
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <asm/assembler.h> 13#include <asm/assembler.h>
14#include <asm/asm-offsets.h> 14#include <asm/asm-offsets.h>
15#include <asm/elf.h> 15#include <asm/hwcap.h>
16#include <asm/pgtable-hwdef.h> 16#include <asm/pgtable-hwdef.h>
17#include <asm/pgtable.h> 17#include <asm/pgtable.h>
18 18
@@ -25,9 +25,11 @@
25 25
26ENTRY(cpu_v7_proc_init) 26ENTRY(cpu_v7_proc_init)
27 mov pc, lr 27 mov pc, lr
28ENDPROC(cpu_v7_proc_init)
28 29
29ENTRY(cpu_v7_proc_fin) 30ENTRY(cpu_v7_proc_fin)
30 mov pc, lr 31 mov pc, lr
32ENDPROC(cpu_v7_proc_fin)
31 33
32/* 34/*
33 * cpu_v7_reset(loc) 35 * cpu_v7_reset(loc)
@@ -43,6 +45,7 @@ ENTRY(cpu_v7_proc_fin)
43 .align 5 45 .align 5
44ENTRY(cpu_v7_reset) 46ENTRY(cpu_v7_reset)
45 mov pc, r0 47 mov pc, r0
48ENDPROC(cpu_v7_reset)
46 49
47/* 50/*
48 * cpu_v7_do_idle() 51 * cpu_v7_do_idle()
@@ -52,8 +55,9 @@ ENTRY(cpu_v7_reset)
52 * IRQs are already disabled. 55 * IRQs are already disabled.
53 */ 56 */
54ENTRY(cpu_v7_do_idle) 57ENTRY(cpu_v7_do_idle)
55 .long 0xe320f003 @ ARM V7 WFI instruction 58 wfi
56 mov pc, lr 59 mov pc, lr
60ENDPROC(cpu_v7_do_idle)
57 61
58ENTRY(cpu_v7_dcache_clean_area) 62ENTRY(cpu_v7_dcache_clean_area)
59#ifndef TLB_CAN_READ_FROM_L1_CACHE 63#ifndef TLB_CAN_READ_FROM_L1_CACHE
@@ -65,6 +69,7 @@ ENTRY(cpu_v7_dcache_clean_area)
65 dsb 69 dsb
66#endif 70#endif
67 mov pc, lr 71 mov pc, lr
72ENDPROC(cpu_v7_dcache_clean_area)
68 73
69/* 74/*
70 * cpu_v7_switch_mm(pgd_phys, tsk) 75 * cpu_v7_switch_mm(pgd_phys, tsk)
@@ -89,6 +94,7 @@ ENTRY(cpu_v7_switch_mm)
89 isb 94 isb
90#endif 95#endif
91 mov pc, lr 96 mov pc, lr
97ENDPROC(cpu_v7_switch_mm)
92 98
93/* 99/*
94 * cpu_v7_set_pte_ext(ptep, pte) 100 * cpu_v7_set_pte_ext(ptep, pte)
@@ -99,26 +105,19 @@ ENTRY(cpu_v7_switch_mm)
99 * (hardware version is stored at -1024 bytes) 105 * (hardware version is stored at -1024 bytes)
100 * - pte - PTE value to store 106 * - pte - PTE value to store
101 * - ext - value for extended PTE bits 107 * - ext - value for extended PTE bits
102 *
103 * Permissions:
104 * YUWD APX AP1 AP0 SVC User
105 * 0xxx 0 0 0 no acc no acc
106 * 100x 1 0 1 r/o no acc
107 * 10x0 1 0 1 r/o no acc
108 * 1011 0 0 1 r/w no acc
109 * 110x 0 1 0 r/w r/o
110 * 11x0 0 1 0 r/w r/o
111 * 1111 0 1 1 r/w r/w
112 */ 108 */
113ENTRY(cpu_v7_set_pte_ext) 109ENTRY(cpu_v7_set_pte_ext)
114#ifdef CONFIG_MMU 110#ifdef CONFIG_MMU
115 str r1, [r0], #-2048 @ linux version 111 str r1, [r0], #-2048 @ linux version
116 112
117 bic r3, r1, #0x000003f0 113 bic r3, r1, #0x000003f0
118 bic r3, r3, #0x00000003 114 bic r3, r3, #PTE_TYPE_MASK
119 orr r3, r3, r2 115 orr r3, r3, r2
120 orr r3, r3, #PTE_EXT_AP0 | 2 116 orr r3, r3, #PTE_EXT_AP0 | 2
121 117
118 tst r2, #1 << 4
119 orrne r3, r3, #PTE_EXT_TEX(1)
120
122 tst r1, #L_PTE_WRITE 121 tst r1, #L_PTE_WRITE
123 tstne r1, #L_PTE_DIRTY 122 tstne r1, #L_PTE_DIRTY
124 orreq r3, r3, #PTE_EXT_APX 123 orreq r3, r3, #PTE_EXT_APX
@@ -128,19 +127,18 @@ ENTRY(cpu_v7_set_pte_ext)
128 tstne r3, #PTE_EXT_APX 127 tstne r3, #PTE_EXT_APX
129 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 128 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
130 129
131 tst r1, #L_PTE_YOUNG
132 biceq r3, r3, #PTE_EXT_APX | PTE_EXT_AP_MASK
133
134 tst r1, #L_PTE_EXEC 130 tst r1, #L_PTE_EXEC
135 orreq r3, r3, #PTE_EXT_XN 131 orreq r3, r3, #PTE_EXT_XN
136 132
137 tst r1, #L_PTE_PRESENT 133 tst r1, #L_PTE_YOUNG
134 tstne r1, #L_PTE_PRESENT
138 moveq r3, #0 135 moveq r3, #0
139 136
140 str r3, [r0] 137 str r3, [r0]
141 mcr p15, 0, r0, c7, c10, 1 @ flush_pte 138 mcr p15, 0, r0, c7, c10, 1 @ flush_pte
142#endif 139#endif
143 mov pc, lr 140 mov pc, lr
141ENDPROC(cpu_v7_set_pte_ext)
144 142
145cpu_v7_name: 143cpu_v7_name:
146 .ascii "ARMv7 Processor" 144 .ascii "ARMv7 Processor"
@@ -182,12 +180,17 @@ __v7_setup:
182 mov r10, #0x1f @ domains 0, 1 = manager 180 mov r10, #0x1f @ domains 0, 1 = manager
183 mcr p15, 0, r10, c3, c0, 0 @ load domain access register 181 mcr p15, 0, r10, c3, c0, 0 @ load domain access register
184#endif 182#endif
183 ldr r5, =0x40e040e0
184 ldr r6, =0xff0aa1a8
185 mcr p15, 0, r5, c10, c2, 0 @ write PRRR
186 mcr p15, 0, r6, c10, c2, 1 @ write NMRR
185 adr r5, v7_crval 187 adr r5, v7_crval
186 ldmia r5, {r5, r6} 188 ldmia r5, {r5, r6}
187 mrc p15, 0, r0, c1, c0, 0 @ read control register 189 mrc p15, 0, r0, c1, c0, 0 @ read control register
188 bic r0, r0, r5 @ clear bits them 190 bic r0, r0, r5 @ clear bits them
189 orr r0, r0, r6 @ set them 191 orr r0, r0, r6 @ set them
190 mov pc, lr @ return to head.S:__ret 192 mov pc, lr @ return to head.S:__ret
193ENDPROC(__v7_setup)
191 194
192 /* 195 /*
193 * V X F I D LR 196 * V X F I D LR
@@ -197,7 +200,7 @@ __v7_setup:
197 */ 200 */
198 .type v7_crval, #object 201 .type v7_crval, #object
199v7_crval: 202v7_crval:
200 crval clear=0x0120c302, mmuset=0x00c0387d, ucset=0x00c0187c 203 crval clear=0x0120c302, mmuset=0x10c0387d, ucset=0x00c0187c
201 204
202__v7_setup_stack: 205__v7_setup_stack:
203 .space 4 * 11 @ 11 registers 206 .space 4 * 11 @ 11 registers
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index 7bd9e7197f60..04dc8b65401b 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -27,7 +27,7 @@
27#include <linux/linkage.h> 27#include <linux/linkage.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <asm/assembler.h> 29#include <asm/assembler.h>
30#include <asm/elf.h> 30#include <asm/hwcap.h>
31#include <mach/hardware.h> 31#include <mach/hardware.h>
32#include <asm/pgtable.h> 32#include <asm/pgtable.h>
33#include <asm/pgtable-hwdef.h> 33#include <asm/pgtable-hwdef.h>
@@ -345,38 +345,38 @@ ENTRY(cpu_xsc3_switch_mm)
345 * cpu_xsc3_set_pte_ext(ptep, pte, ext) 345 * cpu_xsc3_set_pte_ext(ptep, pte, ext)
346 * 346 *
347 * Set a PTE and flush it out 347 * Set a PTE and flush it out
348 *
349 */ 348 */
349cpu_xsc3_mt_table:
350 .long 0x00 @ L_PTE_MT_UNCACHED
351 .long PTE_EXT_TEX(1) @ L_PTE_MT_BUFFERABLE
352 .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH
353 .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK
354 .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED
355 .long 0x00 @ unused
356 .long 0x00 @ L_PTE_MT_MINICACHE (not present)
357 .long PTE_EXT_TEX(5) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC (not present?)
358 .long 0x00 @ unused
359 .long PTE_EXT_TEX(1) @ L_PTE_MT_DEV_WC
360 .long 0x00 @ unused
361 .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED
362 .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED
363 .long 0x00 @ unused
364 .long 0x00 @ unused
365 .long 0x00 @ unused
366
350 .align 5 367 .align 5
351ENTRY(cpu_xsc3_set_pte_ext) 368ENTRY(cpu_xsc3_set_pte_ext)
352 str r1, [r0], #-2048 @ linux version 369 xscale_set_pte_ext_prologue
353 370
354 bic r2, r1, #0xff0 @ keep C, B bits
355 orr r2, r2, #PTE_TYPE_EXT @ extended page
356 tst r1, #L_PTE_SHARED @ shared? 371 tst r1, #L_PTE_SHARED @ shared?
357 orrne r2, r2, #0x200 372 and r1, r1, #L_PTE_MT_MASK
358 373 adr ip, cpu_xsc3_mt_table
359 eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY 374 ldr ip, [ip, r1]
360 375 orrne r2, r2, #PTE_EXT_COHERENT @ interlock: mask in coherent bit
361 tst r3, #L_PTE_USER @ user? 376 bic r2, r2, #0x0c @ clear old C,B bits
362 orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w 377 orr r2, r2, ip
363 378
364 tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ write and dirty? 379 xscale_set_pte_ext_epilogue
365 orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w
366 @ combined with user -> user r/w
367
368 @ If it's cacheable, it needs to be in L2 also.
369 eor ip, r1, #L_PTE_CACHEABLE
370 tst ip, #L_PTE_CACHEABLE
371 orreq r2, r2, #PTE_EXT_TEX(0x5)
372
373 tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young?
374 movne r2, #0 @ no -> fault
375
376 str r2, [r0] @ hardware version
377 mov ip, #0
378 mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
379 mcr p15, 0, ip, c7, c10, 4 @ data write barrier
380 mov pc, lr 380 mov pc, lr
381 381
382 .ltorg 382 .ltorg
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 2dd85273976f..0cce37b93937 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -23,7 +23,7 @@
23#include <linux/linkage.h> 23#include <linux/linkage.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <asm/assembler.h> 25#include <asm/assembler.h>
26#include <asm/elf.h> 26#include <asm/hwcap.h>
27#include <asm/pgtable.h> 27#include <asm/pgtable.h>
28#include <asm/pgtable-hwdef.h> 28#include <asm/pgtable-hwdef.h>
29#include <asm/page.h> 29#include <asm/page.h>
@@ -406,8 +406,6 @@ ENTRY(cpu_xscale_dcache_clean_area)
406 406
407/* =============================== PageTable ============================== */ 407/* =============================== PageTable ============================== */
408 408
409#define PTE_CACHE_WRITE_ALLOCATE 0
410
411/* 409/*
412 * cpu_xscale_switch_mm(pgd) 410 * cpu_xscale_switch_mm(pgd)
413 * 411 *
@@ -431,56 +429,42 @@ ENTRY(cpu_xscale_switch_mm)
431 * 429 *
432 * Errata 40: must set memory to write-through for user read-only pages. 430 * Errata 40: must set memory to write-through for user read-only pages.
433 */ 431 */
432cpu_xscale_mt_table:
433 .long 0x00 @ L_PTE_MT_UNCACHED
434 .long PTE_BUFFERABLE @ L_PTE_MT_BUFFERABLE
435 .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH
436 .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK
437 .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED
438 .long 0x00 @ unused
439 .long PTE_EXT_TEX(1) | PTE_CACHEABLE @ L_PTE_MT_MINICACHE
440 .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC
441 .long 0x00 @ unused
442 .long PTE_BUFFERABLE @ L_PTE_MT_DEV_WC
443 .long 0x00 @ unused
444 .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED
445 .long 0x00 @ L_PTE_MT_DEV_NONSHARED
446 .long 0x00 @ unused
447 .long 0x00 @ unused
448 .long 0x00 @ unused
449
434 .align 5 450 .align 5
435ENTRY(cpu_xscale_set_pte_ext) 451ENTRY(cpu_xscale_set_pte_ext)
436 str r1, [r0], #-2048 @ linux version 452 xscale_set_pte_ext_prologue
437
438 bic r2, r1, #0xff0
439 orr r2, r2, #PTE_TYPE_EXT @ extended page
440
441 eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
442
443 tst r3, #L_PTE_USER @ User?
444 orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w
445
446 tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
447 orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w
448 @ combined with user -> user r/w
449
450 @
451 @ Handle the X bit. We want to set this bit for the minicache
452 @ (U = E = B = W = 0, C = 1) or when write allocate is enabled,
453 @ and we have a writeable, cacheable region. If we ignore the
454 @ U and E bits, we can allow user space to use the minicache as
455 @ well.
456 @
457 @ X = (C & ~W & ~B) | (C & W & B & write_allocate)
458 @
459 eor ip, r1, #L_PTE_CACHEABLE
460 tst ip, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE
461#if PTE_CACHE_WRITE_ALLOCATE
462 eorne ip, r1, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE
463 tstne ip, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE
464#endif
465 orreq r2, r2, #PTE_EXT_TEX(1)
466 453
467 @ 454 @
468 @ Erratum 40: The B bit must be cleared for a user read-only 455 @ Erratum 40: must set memory to write-through for user read-only pages
469 @ cacheable page.
470 @
471 @ B = B & ~(U & C & ~W)
472 @ 456 @
473 and ip, r1, #L_PTE_USER | L_PTE_WRITE | L_PTE_CACHEABLE 457 and ip, r1, #(L_PTE_MT_MASK | L_PTE_USER | L_PTE_WRITE) & ~(4 << 2)
474 teq ip, #L_PTE_USER | L_PTE_CACHEABLE 458 teq ip, #L_PTE_MT_WRITEBACK | L_PTE_USER
475 biceq r2, r2, #PTE_BUFFERABLE
476 459
477 tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young? 460 moveq r1, #L_PTE_MT_WRITETHROUGH
478 movne r2, #0 @ no -> fault 461 and r1, r1, #L_PTE_MT_MASK
462 adr ip, cpu_xscale_mt_table
463 ldr ip, [ip, r1]
464 bic r2, r2, #0x0c
465 orr r2, r2, ip
479 466
480 str r2, [r0] @ hardware version 467 xscale_set_pte_ext_epilogue
481 mov ip, #0
482 mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
483 mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
484 mov pc, lr 468 mov pc, lr
485 469
486 470
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index b56dda8052f7..24ba5109f2e7 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -51,6 +51,7 @@ ENTRY(v7wbi_flush_user_tlb_range)
51 mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB 51 mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
52 dsb 52 dsb
53 mov pc, lr 53 mov pc, lr
54ENDPROC(v7wbi_flush_user_tlb_range)
54 55
55/* 56/*
56 * v7wbi_flush_kern_tlb_range(start,end) 57 * v7wbi_flush_kern_tlb_range(start,end)
@@ -77,6 +78,7 @@ ENTRY(v7wbi_flush_kern_tlb_range)
77 dsb 78 dsb
78 isb 79 isb
79 mov pc, lr 80 mov pc, lr
81ENDPROC(v7wbi_flush_kern_tlb_range)
80 82
81 .section ".text.init", #alloc, #execinstr 83 .section ".text.init", #alloc, #execinstr
82 84