aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/cache-v6.S30
-rw-r--r--arch/arm/mm/cache-v7.S30
-rw-r--r--arch/arm/mm/copypage-v4mc.c2
-rw-r--r--arch/arm/mm/copypage-v6.c2
-rw-r--r--arch/arm/mm/copypage-xscale.c2
-rw-r--r--arch/arm/mm/dma-mapping.c6
-rw-r--r--arch/arm/mm/fault-armv.c8
-rw-r--r--arch/arm/mm/fault.c13
-rw-r--r--arch/arm/mm/flush.c69
-rw-r--r--arch/arm/mm/init.c41
-rw-r--r--arch/arm/mm/ioremap.c8
-rw-r--r--arch/arm/mm/mmap.c22
-rw-r--r--arch/arm/mm/mmu.c50
-rw-r--r--arch/arm/mm/proc-arm1020.S2
-rw-r--r--arch/arm/mm/proc-arm1020e.S2
-rw-r--r--arch/arm/mm/proc-arm1022.S2
-rw-r--r--arch/arm/mm/proc-arm1026.S2
-rw-r--r--arch/arm/mm/proc-arm6_7.S2
-rw-r--r--arch/arm/mm/proc-arm720.S2
-rw-r--r--arch/arm/mm/proc-arm740.S2
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S2
-rw-r--r--arch/arm/mm/proc-arm920.S2
-rw-r--r--arch/arm/mm/proc-arm922.S2
-rw-r--r--arch/arm/mm/proc-arm925.S2
-rw-r--r--arch/arm/mm/proc-arm926.S2
-rw-r--r--arch/arm/mm/proc-arm940.S2
-rw-r--r--arch/arm/mm/proc-arm946.S2
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S2
-rw-r--r--arch/arm/mm/proc-fa526.S2
-rw-r--r--arch/arm/mm/proc-feroceon.S2
-rw-r--r--arch/arm/mm/proc-mohawk.S2
-rw-r--r--arch/arm/mm/proc-sa110.S2
-rw-r--r--arch/arm/mm/proc-sa1100.S2
-rw-r--r--arch/arm/mm/proc-v6.S49
-rw-r--r--arch/arm/mm/proc-v7.S57
-rw-r--r--arch/arm/mm/proc-xsc3.S2
-rw-r--r--arch/arm/mm/proc-xscale.S2
-rw-r--r--arch/arm/mm/tlb-v7.S33
38 files changed, 295 insertions, 171 deletions
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 86aa689ef1aa..99fa688dfadd 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -21,18 +21,22 @@
21#define D_CACHE_LINE_SIZE 32 21#define D_CACHE_LINE_SIZE 32
22#define BTB_FLUSH_SIZE 8 22#define BTB_FLUSH_SIZE 8
23 23
24#ifdef CONFIG_ARM_ERRATA_411920
25/* 24/*
26 * Invalidate the entire I cache (this code is a workaround for the ARM1136 25 * v6_flush_icache_all()
27 * erratum 411920 - Invalidate Instruction Cache operation can fail. This 26 *
28 * erratum is present in 1136, 1156 and 1176. It does not affect the MPCore. 27 * Flush the whole I-cache.
29 * 28 *
30 * Registers: 29 * ARM1136 erratum 411920 - Invalidate Instruction Cache operation can fail.
31 * r0 - set to 0 30 * This erratum is present in 1136, 1156 and 1176. It does not affect the
32 * r1 - corrupted 31 * MPCore.
32 *
33 * Registers:
34 * r0 - set to 0
35 * r1 - corrupted
33 */ 36 */
34ENTRY(v6_icache_inval_all) 37ENTRY(v6_flush_icache_all)
35 mov r0, #0 38 mov r0, #0
39#ifdef CONFIG_ARM_ERRATA_411920
36 mrs r1, cpsr 40 mrs r1, cpsr
37 cpsid ifa @ disable interrupts 41 cpsid ifa @ disable interrupts
38 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 42 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
@@ -43,8 +47,11 @@ ENTRY(v6_icache_inval_all)
43 .rept 11 @ ARM Ltd recommends at least 47 .rept 11 @ ARM Ltd recommends at least
44 nop @ 11 NOPs 48 nop @ 11 NOPs
45 .endr 49 .endr
46 mov pc, lr 50#else
51 mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache
47#endif 52#endif
53 mov pc, lr
54ENDPROC(v6_flush_icache_all)
48 55
49/* 56/*
50 * v6_flush_cache_all() 57 * v6_flush_cache_all()
@@ -60,7 +67,7 @@ ENTRY(v6_flush_kern_cache_all)
60#ifndef CONFIG_ARM_ERRATA_411920 67#ifndef CONFIG_ARM_ERRATA_411920
61 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 68 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
62#else 69#else
63 b v6_icache_inval_all 70 b v6_flush_icache_all
64#endif 71#endif
65#else 72#else
66 mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate 73 mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
@@ -138,7 +145,7 @@ ENTRY(v6_coherent_user_range)
138#ifndef CONFIG_ARM_ERRATA_411920 145#ifndef CONFIG_ARM_ERRATA_411920
139 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 146 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
140#else 147#else
141 b v6_icache_inval_all 148 b v6_flush_icache_all
142#endif 149#endif
143#else 150#else
144 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB 151 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
@@ -312,6 +319,7 @@ ENDPROC(v6_dma_unmap_area)
312 319
313 .type v6_cache_fns, #object 320 .type v6_cache_fns, #object
314ENTRY(v6_cache_fns) 321ENTRY(v6_cache_fns)
322 .long v6_flush_icache_all
315 .long v6_flush_kern_cache_all 323 .long v6_flush_kern_cache_all
316 .long v6_flush_user_cache_all 324 .long v6_flush_user_cache_all
317 .long v6_flush_user_cache_range 325 .long v6_flush_user_cache_range
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 37c8157e116e..a3ebf7a4f49b 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -18,6 +18,21 @@
18#include "proc-macros.S" 18#include "proc-macros.S"
19 19
20/* 20/*
21 * v7_flush_icache_all()
22 *
23 * Flush the whole I-cache.
24 *
25 * Registers:
26 * r0 - set to 0
27 */
28ENTRY(v7_flush_icache_all)
29 mov r0, #0
30 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
31 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
32 mov pc, lr
33ENDPROC(v7_flush_icache_all)
34
35/*
21 * v7_flush_dcache_all() 36 * v7_flush_dcache_all()
22 * 37 *
23 * Flush the whole D-cache. 38 * Flush the whole D-cache.
@@ -91,11 +106,8 @@ ENTRY(v7_flush_kern_cache_all)
91 THUMB( stmfd sp!, {r4-r7, r9-r11, lr} ) 106 THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
92 bl v7_flush_dcache_all 107 bl v7_flush_dcache_all
93 mov r0, #0 108 mov r0, #0
94#ifdef CONFIG_SMP 109 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
95 mcr p15, 0, r0, c7, c1, 0 @ invalidate I-cache inner shareable 110 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
96#else
97 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
98#endif
99 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) 111 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
100 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) 112 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
101 mov pc, lr 113 mov pc, lr
@@ -171,11 +183,8 @@ ENTRY(v7_coherent_user_range)
171 cmp r0, r1 183 cmp r0, r1
172 blo 1b 184 blo 1b
173 mov r0, #0 185 mov r0, #0
174#ifdef CONFIG_SMP 186 ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable
175 mcr p15, 0, r0, c7, c1, 6 @ invalidate BTB Inner Shareable 187 ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB
176#else
177 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
178#endif
179 dsb 188 dsb
180 isb 189 isb
181 mov pc, lr 190 mov pc, lr
@@ -309,6 +318,7 @@ ENDPROC(v7_dma_unmap_area)
309 318
310 .type v7_cache_fns, #object 319 .type v7_cache_fns, #object
311ENTRY(v7_cache_fns) 320ENTRY(v7_cache_fns)
321 .long v7_flush_icache_all
312 .long v7_flush_kern_cache_all 322 .long v7_flush_kern_cache_all
313 .long v7_flush_user_cache_all 323 .long v7_flush_user_cache_all
314 .long v7_flush_user_cache_range 324 .long v7_flush_user_cache_range
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index 598c51ad5071..b8061519ce77 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -73,7 +73,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
73{ 73{
74 void *kto = kmap_atomic(to, KM_USER1); 74 void *kto = kmap_atomic(to, KM_USER1);
75 75
76 if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) 76 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
77 __flush_dcache_page(page_mapping(from), from); 77 __flush_dcache_page(page_mapping(from), from);
78 78
79 spin_lock(&minicache_lock); 79 spin_lock(&minicache_lock);
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index f55fa1044f72..bdba6c65c901 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -79,7 +79,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
79 unsigned int offset = CACHE_COLOUR(vaddr); 79 unsigned int offset = CACHE_COLOUR(vaddr);
80 unsigned long kfrom, kto; 80 unsigned long kfrom, kto;
81 81
82 if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) 82 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
83 __flush_dcache_page(page_mapping(from), from); 83 __flush_dcache_page(page_mapping(from), from);
84 84
85 /* FIXME: not highmem safe */ 85 /* FIXME: not highmem safe */
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index 9920c0ae2096..649bbcd325bf 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -95,7 +95,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
95{ 95{
96 void *kto = kmap_atomic(to, KM_USER1); 96 void *kto = kmap_atomic(to, KM_USER1);
97 97
98 if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) 98 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
99 __flush_dcache_page(page_mapping(from), from); 99 __flush_dcache_page(page_mapping(from), from);
100 100
101 spin_lock(&minicache_lock); 101 spin_lock(&minicache_lock);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 4bc43e535d3b..e4dd0646e859 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -523,6 +523,12 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
523 outer_inv_range(paddr, paddr + size); 523 outer_inv_range(paddr, paddr + size);
524 524
525 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 525 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
526
527 /*
528 * Mark the D-cache clean for this page to avoid extra flushing.
529 */
530 if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
531 set_bit(PG_dcache_clean, &page->flags);
526} 532}
527EXPORT_SYMBOL(___dma_page_dev_to_cpu); 533EXPORT_SYMBOL(___dma_page_dev_to_cpu);
528 534
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 9b906dec1ca1..8440d952ba6d 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -28,6 +28,7 @@
28 28
29static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; 29static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
30 30
31#if __LINUX_ARM_ARCH__ < 6
31/* 32/*
32 * We take the easy way out of this problem - we make the 33 * We take the easy way out of this problem - we make the
33 * PTE uncacheable. However, we leave the write buffer on. 34 * PTE uncacheable. However, we leave the write buffer on.
@@ -141,7 +142,7 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
141 * a page table, or changing an existing PTE. Basically, there are two 142 * a page table, or changing an existing PTE. Basically, there are two
142 * things that we need to take care of: 143 * things that we need to take care of:
143 * 144 *
144 * 1. If PG_dcache_dirty is set for the page, we need to ensure 145 * 1. If PG_dcache_clean is not set for the page, we need to ensure
145 * that any cache entries for the kernels virtual memory 146 * that any cache entries for the kernels virtual memory
146 * range are written back to the page. 147 * range are written back to the page.
147 * 2. If we have multiple shared mappings of the same space in 148 * 2. If we have multiple shared mappings of the same space in
@@ -168,10 +169,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
168 return; 169 return;
169 170
170 mapping = page_mapping(page); 171 mapping = page_mapping(page);
171#ifndef CONFIG_SMP 172 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
172 if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
173 __flush_dcache_page(mapping, page); 173 __flush_dcache_page(mapping, page);
174#endif
175 if (mapping) { 174 if (mapping) {
176 if (cache_is_vivt()) 175 if (cache_is_vivt())
177 make_coherent(mapping, vma, addr, ptep, pfn); 176 make_coherent(mapping, vma, addr, ptep, pfn);
@@ -179,6 +178,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
179 __flush_icache_all(); 178 __flush_icache_all();
180 } 179 }
181} 180}
181#endif /* __LINUX_ARM_ARCH__ < 6 */
182 182
183/* 183/*
184 * Check whether the write buffer has physical address aliasing 184 * Check whether the write buffer has physical address aliasing
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 23b0b03af5ea..1e21e125fe3a 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -581,6 +581,19 @@ static struct fsr_info ifsr_info[] = {
581 { do_bad, SIGBUS, 0, "unknown 31" }, 581 { do_bad, SIGBUS, 0, "unknown 31" },
582}; 582};
583 583
584void __init
585hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
586 int sig, int code, const char *name)
587{
588 if (nr < 0 || nr >= ARRAY_SIZE(ifsr_info))
589 BUG();
590
591 ifsr_info[nr].fn = fn;
592 ifsr_info[nr].sig = sig;
593 ifsr_info[nr].code = code;
594 ifsr_info[nr].name = name;
595}
596
584asmlinkage void __exception 597asmlinkage void __exception
585do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) 598do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
586{ 599{
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index c6844cb9b508..391ffae75098 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -17,6 +17,7 @@
17#include <asm/smp_plat.h> 17#include <asm/smp_plat.h>
18#include <asm/system.h> 18#include <asm/system.h>
19#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
20#include <asm/smp_plat.h>
20 21
21#include "mm.h" 22#include "mm.h"
22 23
@@ -39,6 +40,18 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
39 : "cc"); 40 : "cc");
40} 41}
41 42
43static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
44{
45 unsigned long colour = CACHE_COLOUR(vaddr);
46 unsigned long offset = vaddr & (PAGE_SIZE - 1);
47 unsigned long to;
48
49 set_pte_ext(TOP_PTE(ALIAS_FLUSH_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0);
50 to = ALIAS_FLUSH_START + (colour << PAGE_SHIFT) + offset;
51 flush_tlb_kernel_page(to);
52 flush_icache_range(to, to + len);
53}
54
42void flush_cache_mm(struct mm_struct *mm) 55void flush_cache_mm(struct mm_struct *mm)
43{ 56{
44 if (cache_is_vivt()) { 57 if (cache_is_vivt()) {
@@ -89,16 +102,16 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
89 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) 102 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
90 __flush_icache_all(); 103 __flush_icache_all();
91} 104}
105
92#else 106#else
93#define flush_pfn_alias(pfn,vaddr) do { } while (0) 107#define flush_pfn_alias(pfn,vaddr) do { } while (0)
108#define flush_icache_alias(pfn,vaddr,len) do { } while (0)
94#endif 109#endif
95 110
96#ifdef CONFIG_SMP
97static void flush_ptrace_access_other(void *args) 111static void flush_ptrace_access_other(void *args)
98{ 112{
99 __flush_icache_all(); 113 __flush_icache_all();
100} 114}
101#endif
102 115
103static 116static
104void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, 117void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
@@ -118,15 +131,16 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
118 return; 131 return;
119 } 132 }
120 133
121 /* VIPT non-aliasing cache */ 134 /* VIPT non-aliasing D-cache */
122 if (vma->vm_flags & VM_EXEC) { 135 if (vma->vm_flags & VM_EXEC) {
123 unsigned long addr = (unsigned long)kaddr; 136 unsigned long addr = (unsigned long)kaddr;
124 __cpuc_coherent_kern_range(addr, addr + len); 137 if (icache_is_vipt_aliasing())
125#ifdef CONFIG_SMP 138 flush_icache_alias(page_to_pfn(page), uaddr, len);
139 else
140 __cpuc_coherent_kern_range(addr, addr + len);
126 if (cache_ops_need_broadcast()) 141 if (cache_ops_need_broadcast())
127 smp_call_function(flush_ptrace_access_other, 142 smp_call_function(flush_ptrace_access_other,
128 NULL, 1); 143 NULL, 1);
129#endif
130 } 144 }
131} 145}
132 146
@@ -215,6 +229,36 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
215 flush_dcache_mmap_unlock(mapping); 229 flush_dcache_mmap_unlock(mapping);
216} 230}
217 231
232#if __LINUX_ARM_ARCH__ >= 6
233void __sync_icache_dcache(pte_t pteval)
234{
235 unsigned long pfn;
236 struct page *page;
237 struct address_space *mapping;
238
239 if (!pte_present_user(pteval))
240 return;
241 if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
242 /* only flush non-aliasing VIPT caches for exec mappings */
243 return;
244 pfn = pte_pfn(pteval);
245 if (!pfn_valid(pfn))
246 return;
247
248 page = pfn_to_page(pfn);
249 if (cache_is_vipt_aliasing())
250 mapping = page_mapping(page);
251 else
252 mapping = NULL;
253
254 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
255 __flush_dcache_page(mapping, page);
256 /* pte_exec() already checked above for non-aliasing VIPT cache */
257 if (cache_is_vipt_nonaliasing() || pte_exec(pteval))
258 __flush_icache_all();
259}
260#endif
261
218/* 262/*
219 * Ensure cache coherency between kernel mapping and userspace mapping 263 * Ensure cache coherency between kernel mapping and userspace mapping
220 * of this page. 264 * of this page.
@@ -246,17 +290,16 @@ void flush_dcache_page(struct page *page)
246 290
247 mapping = page_mapping(page); 291 mapping = page_mapping(page);
248 292
249#ifndef CONFIG_SMP 293 if (!cache_ops_need_broadcast() &&
250 if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) 294 mapping && !mapping_mapped(mapping))
251 set_bit(PG_dcache_dirty, &page->flags); 295 clear_bit(PG_dcache_clean, &page->flags);
252 else 296 else {
253#endif
254 {
255 __flush_dcache_page(mapping, page); 297 __flush_dcache_page(mapping, page);
256 if (mapping && cache_is_vivt()) 298 if (mapping && cache_is_vivt())
257 __flush_dcache_aliases(mapping, page); 299 __flush_dcache_aliases(mapping, page);
258 else if (mapping) 300 else if (mapping)
259 __flush_icache_all(); 301 __flush_icache_all();
302 set_bit(PG_dcache_clean, &page->flags);
260 } 303 }
261} 304}
262EXPORT_SYMBOL(flush_dcache_page); 305EXPORT_SYMBOL(flush_dcache_page);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 7185b00650fe..7fd9b5eb177f 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -150,6 +150,7 @@ static void __init find_limits(struct meminfo *mi,
150static void __init arm_bootmem_init(struct meminfo *mi, 150static void __init arm_bootmem_init(struct meminfo *mi,
151 unsigned long start_pfn, unsigned long end_pfn) 151 unsigned long start_pfn, unsigned long end_pfn)
152{ 152{
153 struct memblock_region *reg;
153 unsigned int boot_pages; 154 unsigned int boot_pages;
154 phys_addr_t bitmap; 155 phys_addr_t bitmap;
155 pg_data_t *pgdat; 156 pg_data_t *pgdat;
@@ -180,13 +181,13 @@ static void __init arm_bootmem_init(struct meminfo *mi,
180 /* 181 /*
181 * Reserve the memblock reserved regions in bootmem. 182 * Reserve the memblock reserved regions in bootmem.
182 */ 183 */
183 for (i = 0; i < memblock.reserved.cnt; i++) { 184 for_each_memblock(reserved, reg) {
184 phys_addr_t start = memblock_start_pfn(&memblock.reserved, i); 185 phys_addr_t start = memblock_region_reserved_base_pfn(reg);
185 if (start >= start_pfn && 186 phys_addr_t end = memblock_region_reserved_end_pfn(reg);
186 memblock_end_pfn(&memblock.reserved, i) <= end_pfn) 187 if (start >= start_pfn && end <= end_pfn)
187 reserve_bootmem_node(pgdat, __pfn_to_phys(start), 188 reserve_bootmem_node(pgdat, __pfn_to_phys(start),
188 memblock_size_bytes(&memblock.reserved, i), 189 (end - start) << PAGE_SHIFT,
189 BOOTMEM_DEFAULT); 190 BOOTMEM_DEFAULT);
190 } 191 }
191} 192}
192 193
@@ -237,20 +238,7 @@ static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min,
237#ifndef CONFIG_SPARSEMEM 238#ifndef CONFIG_SPARSEMEM
238int pfn_valid(unsigned long pfn) 239int pfn_valid(unsigned long pfn)
239{ 240{
240 struct memblock_region *mem = &memblock.memory; 241 return memblock_is_memory(pfn << PAGE_SHIFT);
241 unsigned int left = 0, right = mem->cnt;
242
243 do {
244 unsigned int mid = (right + left) / 2;
245
246 if (pfn < memblock_start_pfn(mem, mid))
247 right = mid;
248 else if (pfn >= memblock_end_pfn(mem, mid))
249 left = mid + 1;
250 else
251 return 1;
252 } while (left < right);
253 return 0;
254} 242}
255EXPORT_SYMBOL(pfn_valid); 243EXPORT_SYMBOL(pfn_valid);
256 244
@@ -260,10 +248,11 @@ static void arm_memory_present(void)
260#else 248#else
261static void arm_memory_present(void) 249static void arm_memory_present(void)
262{ 250{
263 int i; 251 struct memblock_region *reg;
264 for (i = 0; i < memblock.memory.cnt; i++) 252
265 memory_present(0, memblock_start_pfn(&memblock.memory, i), 253 for_each_memblock(memory, reg)
266 memblock_end_pfn(&memblock.memory, i)); 254 memory_present(0, memblock_region_memory_base_pfn(reg),
255 memblock_region_memory_end_pfn(reg));
267} 256}
268#endif 257#endif
269 258
@@ -277,7 +266,7 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
277 266
278 /* Register the kernel text, kernel data and initrd with memblock. */ 267 /* Register the kernel text, kernel data and initrd with memblock. */
279#ifdef CONFIG_XIP_KERNEL 268#ifdef CONFIG_XIP_KERNEL
280 memblock_reserve(__pa(_data), _end - _data); 269 memblock_reserve(__pa(_sdata), _end - _sdata);
281#else 270#else
282 memblock_reserve(__pa(_stext), _end - _stext); 271 memblock_reserve(__pa(_stext), _end - _stext);
283#endif 272#endif
@@ -545,7 +534,7 @@ void __init mem_init(void)
545 534
546 MLK_ROUNDUP(__init_begin, __init_end), 535 MLK_ROUNDUP(__init_begin, __init_end),
547 MLK_ROUNDUP(_text, _etext), 536 MLK_ROUNDUP(_text, _etext),
548 MLK_ROUNDUP(_data, _edata)); 537 MLK_ROUNDUP(_sdata, _edata));
549 538
550#undef MLK 539#undef MLK
551#undef MLM 540#undef MLM
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index ab506272b2d3..17e7b0b57e49 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -204,8 +204,12 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
204 /* 204 /*
205 * Don't allow RAM to be mapped - this causes problems with ARMv6+ 205 * Don't allow RAM to be mapped - this causes problems with ARMv6+
206 */ 206 */
207 if (WARN_ON(pfn_valid(pfn))) 207 if (pfn_valid(pfn)) {
208 return NULL; 208 printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n"
209 KERN_WARNING "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"
210 KERN_WARNING "will fail in the next kernel release. Please fix your driver.\n");
211 WARN_ON(1);
212 }
209 213
210 type = get_mem_type(mtype); 214 type = get_mem_type(mtype);
211 if (!type) 215 if (!type)
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 4f5b39687df5..b0a98305055c 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -144,3 +144,25 @@ int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
144{ 144{
145 return !(pfn + (size >> PAGE_SHIFT) > 0x00100000); 145 return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
146} 146}
147
148#ifdef CONFIG_STRICT_DEVMEM
149
150#include <linux/ioport.h>
151
152/*
153 * devmem_is_allowed() checks to see if /dev/mem access to a certain
154 * address is valid. The argument is a physical page number.
155 * We mimic x86 here by disallowing access to system RAM as well as
156 * device-exclusive MMIO regions. This effectively disable read()/write()
157 * on /dev/mem.
158 */
159int devmem_is_allowed(unsigned long pfn)
160{
161 if (iomem_is_exclusive(pfn << PAGE_SHIFT))
162 return 0;
163 if (!page_is_ram(pfn))
164 return 1;
165 return 0;
166}
167
168#endif
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 6a3a2d0cd6db..c32f731d56d3 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -248,7 +248,7 @@ static struct mem_type mem_types[] = {
248 }, 248 },
249 [MT_MEMORY] = { 249 [MT_MEMORY] = {
250 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 250 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
251 L_PTE_USER | L_PTE_EXEC, 251 L_PTE_WRITE | L_PTE_EXEC,
252 .prot_l1 = PMD_TYPE_TABLE, 252 .prot_l1 = PMD_TYPE_TABLE,
253 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 253 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
254 .domain = DOMAIN_KERNEL, 254 .domain = DOMAIN_KERNEL,
@@ -259,7 +259,7 @@ static struct mem_type mem_types[] = {
259 }, 259 },
260 [MT_MEMORY_NONCACHED] = { 260 [MT_MEMORY_NONCACHED] = {
261 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 261 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
262 L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE, 262 L_PTE_WRITE | L_PTE_EXEC | L_PTE_MT_BUFFERABLE,
263 .prot_l1 = PMD_TYPE_TABLE, 263 .prot_l1 = PMD_TYPE_TABLE,
264 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 264 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
265 .domain = DOMAIN_KERNEL, 265 .domain = DOMAIN_KERNEL,
@@ -310,9 +310,8 @@ static void __init build_mem_type_table(void)
310 cachepolicy = CPOLICY_WRITEBACK; 310 cachepolicy = CPOLICY_WRITEBACK;
311 ecc_mask = 0; 311 ecc_mask = 0;
312 } 312 }
313#ifdef CONFIG_SMP 313 if (is_smp())
314 cachepolicy = CPOLICY_WRITEALLOC; 314 cachepolicy = CPOLICY_WRITEALLOC;
315#endif
316 315
317 /* 316 /*
318 * Strip out features not present on earlier architectures. 317 * Strip out features not present on earlier architectures.
@@ -406,13 +405,11 @@ static void __init build_mem_type_table(void)
406 cp = &cache_policies[cachepolicy]; 405 cp = &cache_policies[cachepolicy];
407 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; 406 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
408 407
409#ifndef CONFIG_SMP
410 /* 408 /*
411 * Only use write-through for non-SMP systems 409 * Only use write-through for non-SMP systems
412 */ 410 */
413 if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) 411 if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
414 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; 412 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
415#endif
416 413
417 /* 414 /*
418 * Enable CPU-specific coherency if supported. 415 * Enable CPU-specific coherency if supported.
@@ -436,22 +433,23 @@ static void __init build_mem_type_table(void)
436 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 433 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
437 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 434 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
438 435
439#ifdef CONFIG_SMP 436 if (is_smp()) {
440 /* 437 /*
441 * Mark memory with the "shared" attribute for SMP systems 438 * Mark memory with the "shared" attribute
442 */ 439 * for SMP systems
443 user_pgprot |= L_PTE_SHARED; 440 */
444 kern_pgprot |= L_PTE_SHARED; 441 user_pgprot |= L_PTE_SHARED;
445 vecs_pgprot |= L_PTE_SHARED; 442 kern_pgprot |= L_PTE_SHARED;
446 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; 443 vecs_pgprot |= L_PTE_SHARED;
447 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; 444 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
448 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; 445 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
449 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; 446 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
450 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 447 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
451 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; 448 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
452 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 449 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
453 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; 450 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
454#endif 451 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
452 }
455 } 453 }
456 454
457 /* 455 /*
@@ -829,8 +827,7 @@ static void __init sanity_check_meminfo(void)
829 * rather difficult. 827 * rather difficult.
830 */ 828 */
831 reason = "with VIPT aliasing cache"; 829 reason = "with VIPT aliasing cache";
832#ifdef CONFIG_SMP 830 } else if (is_smp() && tlb_ops_need_broadcast()) {
833 } else if (tlb_ops_need_broadcast()) {
834 /* 831 /*
835 * kmap_high needs to occasionally flush TLB entries, 832 * kmap_high needs to occasionally flush TLB entries,
836 * however, if the TLB entries need to be broadcast 833 * however, if the TLB entries need to be broadcast
@@ -840,7 +837,6 @@ static void __init sanity_check_meminfo(void)
840 * (must not be called with irqs off) 837 * (must not be called with irqs off)
841 */ 838 */
842 reason = "without hardware TLB ops broadcasting"; 839 reason = "without hardware TLB ops broadcasting";
843#endif
844 } 840 }
845 if (reason) { 841 if (reason) {
846 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", 842 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 203a4e944d9e..a6f5f8475b96 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -430,7 +430,7 @@ ENTRY(cpu_arm1020_set_pte_ext)
430#endif /* CONFIG_MMU */ 430#endif /* CONFIG_MMU */
431 mov pc, lr 431 mov pc, lr
432 432
433 __INIT 433 __CPUINIT
434 434
435 .type __arm1020_setup, #function 435 .type __arm1020_setup, #function
436__arm1020_setup: 436__arm1020_setup:
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 1a511e765909..afc06b9c3133 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -412,7 +412,7 @@ ENTRY(cpu_arm1020e_set_pte_ext)
412#endif /* CONFIG_MMU */ 412#endif /* CONFIG_MMU */
413 mov pc, lr 413 mov pc, lr
414 414
415 __INIT 415 __CPUINIT
416 416
417 .type __arm1020e_setup, #function 417 .type __arm1020e_setup, #function
418__arm1020e_setup: 418__arm1020e_setup:
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 1ffa4eb9c34f..8915e0ba3fe5 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -394,7 +394,7 @@ ENTRY(cpu_arm1022_set_pte_ext)
394#endif /* CONFIG_MMU */ 394#endif /* CONFIG_MMU */
395 mov pc, lr 395 mov pc, lr
396 396
397 __INIT 397 __CPUINIT
398 398
399 .type __arm1022_setup, #function 399 .type __arm1022_setup, #function
400__arm1022_setup: 400__arm1022_setup:
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 5697c34b95b0..ff446c5d476f 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -384,7 +384,7 @@ ENTRY(cpu_arm1026_set_pte_ext)
384 mov pc, lr 384 mov pc, lr
385 385
386 386
387 __INIT 387 __CPUINIT
388 388
389 .type __arm1026_setup, #function 389 .type __arm1026_setup, #function
390__arm1026_setup: 390__arm1026_setup:
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S
index 64e0b327c7c5..6a7be1863edd 100644
--- a/arch/arm/mm/proc-arm6_7.S
+++ b/arch/arm/mm/proc-arm6_7.S
@@ -238,7 +238,7 @@ ENTRY(cpu_arm7_reset)
238 mcr p15, 0, r1, c1, c0, 0 @ turn off MMU etc 238 mcr p15, 0, r1, c1, c0, 0 @ turn off MMU etc
239 mov pc, r0 239 mov pc, r0
240 240
241 __INIT 241 __CPUINIT
242 242
243 .type __arm6_setup, #function 243 .type __arm6_setup, #function
244__arm6_setup: mov r0, #0 244__arm6_setup: mov r0, #0
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index 9d96824134fc..c285395f44b2 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -113,7 +113,7 @@ ENTRY(cpu_arm720_reset)
113 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 113 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
114 mov pc, r0 114 mov pc, r0
115 115
116 __INIT 116 __CPUINIT
117 117
118 .type __arm710_setup, #function 118 .type __arm710_setup, #function
119__arm710_setup: 119__arm710_setup:
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index 6c1a9ab059ae..38b27dcba727 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -55,7 +55,7 @@ ENTRY(cpu_arm740_reset)
55 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 55 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
56 mov pc, r0 56 mov pc, r0
57 57
58 __INIT 58 __CPUINIT
59 59
60 .type __arm740_setup, #function 60 .type __arm740_setup, #function
61__arm740_setup: 61__arm740_setup:
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index 6a850dbba22e..0c9786de20af 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -46,7 +46,7 @@ ENTRY(cpu_arm7tdmi_proc_fin)
46ENTRY(cpu_arm7tdmi_reset) 46ENTRY(cpu_arm7tdmi_reset)
47 mov pc, r0 47 mov pc, r0
48 48
49 __INIT 49 __CPUINIT
50 50
51 .type __arm7tdmi_setup, #function 51 .type __arm7tdmi_setup, #function
52__arm7tdmi_setup: 52__arm7tdmi_setup:
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 86f80aa56216..fecf570939f3 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -375,7 +375,7 @@ ENTRY(cpu_arm920_set_pte_ext)
375#endif 375#endif
376 mov pc, lr 376 mov pc, lr
377 377
378 __INIT 378 __CPUINIT
379 379
380 .type __arm920_setup, #function 380 .type __arm920_setup, #function
381__arm920_setup: 381__arm920_setup:
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index f76ce9b62883..e3cbf87c9480 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -379,7 +379,7 @@ ENTRY(cpu_arm922_set_pte_ext)
379#endif /* CONFIG_MMU */ 379#endif /* CONFIG_MMU */
380 mov pc, lr 380 mov pc, lr
381 381
382 __INIT 382 __CPUINIT
383 383
384 .type __arm922_setup, #function 384 .type __arm922_setup, #function
385__arm922_setup: 385__arm922_setup:
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 657bd3f7c153..572424c867b5 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -428,7 +428,7 @@ ENTRY(cpu_arm925_set_pte_ext)
428#endif /* CONFIG_MMU */ 428#endif /* CONFIG_MMU */
429 mov pc, lr 429 mov pc, lr
430 430
431 __INIT 431 __CPUINIT
432 432
433 .type __arm925_setup, #function 433 .type __arm925_setup, #function
434__arm925_setup: 434__arm925_setup:
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 73f1f3c68910..63d168b4ebe6 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -389,7 +389,7 @@ ENTRY(cpu_arm926_set_pte_ext)
389#endif 389#endif
390 mov pc, lr 390 mov pc, lr
391 391
392 __INIT 392 __CPUINIT
393 393
394 .type __arm926_setup, #function 394 .type __arm926_setup, #function
395__arm926_setup: 395__arm926_setup:
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index fffb061a45a5..f6a62822418e 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -264,7 +264,7 @@ ENTRY(arm940_cache_fns)
264 .long arm940_dma_unmap_area 264 .long arm940_dma_unmap_area
265 .long arm940_dma_flush_range 265 .long arm940_dma_flush_range
266 266
267 __INIT 267 __CPUINIT
268 268
269 .type __arm940_setup, #function 269 .type __arm940_setup, #function
270__arm940_setup: 270__arm940_setup:
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 249a6053760a..ea2e7f2eb95b 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -317,7 +317,7 @@ ENTRY(cpu_arm946_dcache_clean_area)
317 mcr p15, 0, r0, c7, c10, 4 @ drain WB 317 mcr p15, 0, r0, c7, c10, 4 @ drain WB
318 mov pc, lr 318 mov pc, lr
319 319
320 __INIT 320 __CPUINIT
321 321
322 .type __arm946_setup, #function 322 .type __arm946_setup, #function
323__arm946_setup: 323__arm946_setup:
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index db475667fac2..db67e3134d7a 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -46,7 +46,7 @@ ENTRY(cpu_arm9tdmi_proc_fin)
46ENTRY(cpu_arm9tdmi_reset) 46ENTRY(cpu_arm9tdmi_reset)
47 mov pc, r0 47 mov pc, r0
48 48
49 __INIT 49 __CPUINIT
50 50
51 .type __arm9tdmi_setup, #function 51 .type __arm9tdmi_setup, #function
52__arm9tdmi_setup: 52__arm9tdmi_setup:
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index 7803fdf70029..7c9ad621f0e6 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -134,7 +134,7 @@ ENTRY(cpu_fa526_set_pte_ext)
134#endif 134#endif
135 mov pc, lr 135 mov pc, lr
136 136
137 __INIT 137 __CPUINIT
138 138
139 .type __fa526_setup, #function 139 .type __fa526_setup, #function
140__fa526_setup: 140__fa526_setup:
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index b304d0104a4e..578da69200cf 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -494,7 +494,7 @@ ENTRY(cpu_feroceon_set_pte_ext)
494#endif 494#endif
495 mov pc, lr 495 mov pc, lr
496 496
497 __INIT 497 __CPUINIT
498 498
499 .type __feroceon_setup, #function 499 .type __feroceon_setup, #function
500__feroceon_setup: 500__feroceon_setup:
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 5f6892fcc167..4458ee6aa713 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -338,7 +338,7 @@ ENTRY(cpu_mohawk_set_pte_ext)
338 mcr p15, 0, r0, c7, c10, 4 @ drain WB 338 mcr p15, 0, r0, c7, c10, 4 @ drain WB
339 mov pc, lr 339 mov pc, lr
340 340
341 __INIT 341 __CPUINIT
342 342
343 .type __mohawk_setup, #function 343 .type __mohawk_setup, #function
344__mohawk_setup: 344__mohawk_setup:
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index a201eb04b5e1..5aa8d59c2e85 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -156,7 +156,7 @@ ENTRY(cpu_sa110_set_pte_ext)
156#endif 156#endif
157 mov pc, lr 157 mov pc, lr
158 158
159 __INIT 159 __CPUINIT
160 160
161 .type __sa110_setup, #function 161 .type __sa110_setup, #function
162__sa110_setup: 162__sa110_setup:
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 7ddc4805bf97..2ac4e6f10713 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -169,7 +169,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
169#endif 169#endif
170 mov pc, lr 170 mov pc, lr
171 171
172 __INIT 172 __CPUINIT
173 173
174 .type __sa1100_setup, #function 174 .type __sa1100_setup, #function
175__sa1100_setup: 175__sa1100_setup:
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 22aac8515196..59a7e1ffe7bc 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -30,13 +30,10 @@
30#define TTB_RGN_WT (2 << 3) 30#define TTB_RGN_WT (2 << 3)
31#define TTB_RGN_WB (3 << 3) 31#define TTB_RGN_WB (3 << 3)
32 32
33#ifndef CONFIG_SMP 33#define TTB_FLAGS_UP TTB_RGN_WBWA
34#define TTB_FLAGS TTB_RGN_WBWA 34#define PMD_FLAGS_UP PMD_SECT_WB
35#define PMD_FLAGS PMD_SECT_WB 35#define TTB_FLAGS_SMP TTB_RGN_WBWA|TTB_S
36#else 36#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
37#define TTB_FLAGS TTB_RGN_WBWA|TTB_S
38#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S
39#endif
40 37
41ENTRY(cpu_v6_proc_init) 38ENTRY(cpu_v6_proc_init)
42 mov pc, lr 39 mov pc, lr
@@ -97,7 +94,8 @@ ENTRY(cpu_v6_switch_mm)
97#ifdef CONFIG_MMU 94#ifdef CONFIG_MMU
98 mov r2, #0 95 mov r2, #0
99 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 96 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
100 orr r0, r0, #TTB_FLAGS 97 ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
98 ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
101 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 99 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
102 mcr p15, 0, r2, c7, c10, 4 @ drain write buffer 100 mcr p15, 0, r2, c7, c10, 4 @ drain write buffer
103 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 101 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
@@ -137,7 +135,7 @@ cpu_pj4_name:
137 135
138 .align 136 .align
139 137
140 __INIT 138 __CPUINIT
141 139
142/* 140/*
143 * __v6_setup 141 * __v6_setup
@@ -156,9 +154,11 @@ cpu_pj4_name:
156 */ 154 */
157__v6_setup: 155__v6_setup:
158#ifdef CONFIG_SMP 156#ifdef CONFIG_SMP
159 mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode 157 ALT_SMP(mrc p15, 0, r0, c1, c0, 1) @ Enable SMP/nAMP mode
158 ALT_UP(nop)
160 orr r0, r0, #0x20 159 orr r0, r0, #0x20
161 mcr p15, 0, r0, c1, c0, 1 160 ALT_SMP(mcr p15, 0, r0, c1, c0, 1)
161 ALT_UP(nop)
162#endif 162#endif
163 163
164 mov r0, #0 164 mov r0, #0
@@ -169,7 +169,8 @@ __v6_setup:
169#ifdef CONFIG_MMU 169#ifdef CONFIG_MMU
170 mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs 170 mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
171 mcr p15, 0, r0, c2, c0, 2 @ TTB control register 171 mcr p15, 0, r0, c2, c0, 2 @ TTB control register
172 orr r4, r4, #TTB_FLAGS 172 ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
173 ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
173 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 174 mcr p15, 0, r4, c2, c0, 1 @ load TTB1
174#endif /* CONFIG_MMU */ 175#endif /* CONFIG_MMU */
175 adr r5, v6_crval 176 adr r5, v6_crval
@@ -192,6 +193,8 @@ __v6_setup:
192v6_crval: 193v6_crval:
193 crval clear=0x01e0fb7f, mmuset=0x00c0387d, ucset=0x00c0187c 194 crval clear=0x01e0fb7f, mmuset=0x00c0387d, ucset=0x00c0187c
194 195
196 __INITDATA
197
195 .type v6_processor_functions, #object 198 .type v6_processor_functions, #object
196ENTRY(v6_processor_functions) 199ENTRY(v6_processor_functions)
197 .word v6_early_abort 200 .word v6_early_abort
@@ -205,6 +208,8 @@ ENTRY(v6_processor_functions)
205 .word cpu_v6_set_pte_ext 208 .word cpu_v6_set_pte_ext
206 .size v6_processor_functions, . - v6_processor_functions 209 .size v6_processor_functions, . - v6_processor_functions
207 210
211 .section ".rodata"
212
208 .type cpu_arch_name, #object 213 .type cpu_arch_name, #object
209cpu_arch_name: 214cpu_arch_name:
210 .asciz "armv6" 215 .asciz "armv6"
@@ -225,10 +230,16 @@ cpu_elf_name:
225__v6_proc_info: 230__v6_proc_info:
226 .long 0x0007b000 231 .long 0x0007b000
227 .long 0x0007f000 232 .long 0x0007f000
228 .long PMD_TYPE_SECT | \ 233 ALT_SMP(.long \
234 PMD_TYPE_SECT | \
229 PMD_SECT_AP_WRITE | \ 235 PMD_SECT_AP_WRITE | \
230 PMD_SECT_AP_READ | \ 236 PMD_SECT_AP_READ | \
231 PMD_FLAGS 237 PMD_FLAGS_SMP)
238 ALT_UP(.long \
239 PMD_TYPE_SECT | \
240 PMD_SECT_AP_WRITE | \
241 PMD_SECT_AP_READ | \
242 PMD_FLAGS_UP)
232 .long PMD_TYPE_SECT | \ 243 .long PMD_TYPE_SECT | \
233 PMD_SECT_XN | \ 244 PMD_SECT_XN | \
234 PMD_SECT_AP_WRITE | \ 245 PMD_SECT_AP_WRITE | \
@@ -249,10 +260,16 @@ __v6_proc_info:
249__pj4_v6_proc_info: 260__pj4_v6_proc_info:
250 .long 0x560f5810 261 .long 0x560f5810
251 .long 0xff0ffff0 262 .long 0xff0ffff0
252 .long PMD_TYPE_SECT | \ 263 ALT_SMP(.long \
264 PMD_TYPE_SECT | \
265 PMD_SECT_AP_WRITE | \
266 PMD_SECT_AP_READ | \
267 PMD_FLAGS_SMP)
268 ALT_UP(.long \
269 PMD_TYPE_SECT | \
253 PMD_SECT_AP_WRITE | \ 270 PMD_SECT_AP_WRITE | \
254 PMD_SECT_AP_READ | \ 271 PMD_SECT_AP_READ | \
255 PMD_FLAGS 272 PMD_FLAGS_UP)
256 .long PMD_TYPE_SECT | \ 273 .long PMD_TYPE_SECT | \
257 PMD_SECT_XN | \ 274 PMD_SECT_XN | \
258 PMD_SECT_AP_WRITE | \ 275 PMD_SECT_AP_WRITE | \
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 7563ff0141bd..53cbe2225153 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -30,15 +30,13 @@
30#define TTB_IRGN_WT ((1 << 0) | (0 << 6)) 30#define TTB_IRGN_WT ((1 << 0) | (0 << 6))
31#define TTB_IRGN_WB ((1 << 0) | (1 << 6)) 31#define TTB_IRGN_WB ((1 << 0) | (1 << 6))
32 32
33#ifndef CONFIG_SMP
34/* PTWs cacheable, inner WB not shareable, outer WB not shareable */ 33/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
35#define TTB_FLAGS TTB_IRGN_WB|TTB_RGN_OC_WB 34#define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB
36#define PMD_FLAGS PMD_SECT_WB 35#define PMD_FLAGS_UP PMD_SECT_WB
37#else 36
38/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ 37/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
39#define TTB_FLAGS TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA 38#define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
40#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S 39#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
41#endif
42 40
43ENTRY(cpu_v7_proc_init) 41ENTRY(cpu_v7_proc_init)
44 mov pc, lr 42 mov pc, lr
@@ -105,7 +103,8 @@ ENTRY(cpu_v7_switch_mm)
105#ifdef CONFIG_MMU 103#ifdef CONFIG_MMU
106 mov r2, #0 104 mov r2, #0
107 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 105 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
108 orr r0, r0, #TTB_FLAGS 106 ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
107 ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
109#ifdef CONFIG_ARM_ERRATA_430973 108#ifdef CONFIG_ARM_ERRATA_430973
110 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 109 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
111#endif 110#endif
@@ -169,7 +168,7 @@ cpu_v7_name:
169 .ascii "ARMv7 Processor" 168 .ascii "ARMv7 Processor"
170 .align 169 .align
171 170
172 __INIT 171 __CPUINIT
173 172
174/* 173/*
175 * __v7_setup 174 * __v7_setup
@@ -188,7 +187,8 @@ cpu_v7_name:
188 */ 187 */
189__v7_ca9mp_setup: 188__v7_ca9mp_setup:
190#ifdef CONFIG_SMP 189#ifdef CONFIG_SMP
191 mrc p15, 0, r0, c1, c0, 1 190 ALT_SMP(mrc p15, 0, r0, c1, c0, 1)
191 ALT_UP(mov r0, #(1 << 6)) @ fake it for UP
192 tst r0, #(1 << 6) @ SMP/nAMP mode enabled? 192 tst r0, #(1 << 6) @ SMP/nAMP mode enabled?
193 orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and 193 orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and
194 mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting 194 mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting
@@ -253,6 +253,14 @@ __v7_setup:
253 orreq r10, r10, #1 << 22 @ set bit #22 253 orreq r10, r10, #1 << 22 @ set bit #22
254 mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register 254 mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
255#endif 255#endif
256#ifdef CONFIG_ARM_ERRATA_743622
257 teq r6, #0x20 @ present in r2p0
258 teqne r6, #0x21 @ present in r2p1
259 teqne r6, #0x22 @ present in r2p2
260 mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register
261 orreq r10, r10, #1 << 6 @ set bit #6
262 mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
263#endif
256 264
2573: mov r10, #0 2653: mov r10, #0
258#ifdef HARVARD_CACHE 266#ifdef HARVARD_CACHE
@@ -262,7 +270,8 @@ __v7_setup:
262#ifdef CONFIG_MMU 270#ifdef CONFIG_MMU
263 mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs 271 mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
264 mcr p15, 0, r10, c2, c0, 2 @ TTB control register 272 mcr p15, 0, r10, c2, c0, 2 @ TTB control register
265 orr r4, r4, #TTB_FLAGS 273 ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
274 ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
266 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 275 mcr p15, 0, r4, c2, c0, 1 @ load TTB1
267 mov r10, #0x1f @ domains 0, 1 = manager 276 mov r10, #0x1f @ domains 0, 1 = manager
268 mcr p15, 0, r10, c3, c0, 0 @ load domain access register 277 mcr p15, 0, r10, c3, c0, 0 @ load domain access register
@@ -324,6 +333,8 @@ v7_crval:
324__v7_setup_stack: 333__v7_setup_stack:
325 .space 4 * 11 @ 11 registers 334 .space 4 * 11 @ 11 registers
326 335
336 __INITDATA
337
327 .type v7_processor_functions, #object 338 .type v7_processor_functions, #object
328ENTRY(v7_processor_functions) 339ENTRY(v7_processor_functions)
329 .word v7_early_abort 340 .word v7_early_abort
@@ -337,6 +348,8 @@ ENTRY(v7_processor_functions)
337 .word cpu_v7_set_pte_ext 348 .word cpu_v7_set_pte_ext
338 .size v7_processor_functions, . - v7_processor_functions 349 .size v7_processor_functions, . - v7_processor_functions
339 350
351 .section ".rodata"
352
340 .type cpu_arch_name, #object 353 .type cpu_arch_name, #object
341cpu_arch_name: 354cpu_arch_name:
342 .asciz "armv7" 355 .asciz "armv7"
@@ -354,10 +367,16 @@ cpu_elf_name:
354__v7_ca9mp_proc_info: 367__v7_ca9mp_proc_info:
355 .long 0x410fc090 @ Required ID value 368 .long 0x410fc090 @ Required ID value
356 .long 0xff0ffff0 @ Mask for ID 369 .long 0xff0ffff0 @ Mask for ID
357 .long PMD_TYPE_SECT | \ 370 ALT_SMP(.long \
371 PMD_TYPE_SECT | \
372 PMD_SECT_AP_WRITE | \
373 PMD_SECT_AP_READ | \
374 PMD_FLAGS_SMP)
375 ALT_UP(.long \
376 PMD_TYPE_SECT | \
358 PMD_SECT_AP_WRITE | \ 377 PMD_SECT_AP_WRITE | \
359 PMD_SECT_AP_READ | \ 378 PMD_SECT_AP_READ | \
360 PMD_FLAGS 379 PMD_FLAGS_UP)
361 .long PMD_TYPE_SECT | \ 380 .long PMD_TYPE_SECT | \
362 PMD_SECT_XN | \ 381 PMD_SECT_XN | \
363 PMD_SECT_AP_WRITE | \ 382 PMD_SECT_AP_WRITE | \
@@ -365,7 +384,7 @@ __v7_ca9mp_proc_info:
365 b __v7_ca9mp_setup 384 b __v7_ca9mp_setup
366 .long cpu_arch_name 385 .long cpu_arch_name
367 .long cpu_elf_name 386 .long cpu_elf_name
368 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 387 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS
369 .long cpu_v7_name 388 .long cpu_v7_name
370 .long v7_processor_functions 389 .long v7_processor_functions
371 .long v7wbi_tlb_fns 390 .long v7wbi_tlb_fns
@@ -380,10 +399,16 @@ __v7_ca9mp_proc_info:
380__v7_proc_info: 399__v7_proc_info:
381 .long 0x000f0000 @ Required ID value 400 .long 0x000f0000 @ Required ID value
382 .long 0x000f0000 @ Mask for ID 401 .long 0x000f0000 @ Mask for ID
383 .long PMD_TYPE_SECT | \ 402 ALT_SMP(.long \
403 PMD_TYPE_SECT | \
404 PMD_SECT_AP_WRITE | \
405 PMD_SECT_AP_READ | \
406 PMD_FLAGS_SMP)
407 ALT_UP(.long \
408 PMD_TYPE_SECT | \
384 PMD_SECT_AP_WRITE | \ 409 PMD_SECT_AP_WRITE | \
385 PMD_SECT_AP_READ | \ 410 PMD_SECT_AP_READ | \
386 PMD_FLAGS 411 PMD_FLAGS_UP)
387 .long PMD_TYPE_SECT | \ 412 .long PMD_TYPE_SECT | \
388 PMD_SECT_XN | \ 413 PMD_SECT_XN | \
389 PMD_SECT_AP_WRITE | \ 414 PMD_SECT_AP_WRITE | \
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index 361a51e49030..cad07e403044 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -404,7 +404,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
404 404
405 .align 405 .align
406 406
407 __INIT 407 __CPUINIT
408 408
409 .type __xsc3_setup, #function 409 .type __xsc3_setup, #function
410__xsc3_setup: 410__xsc3_setup:
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 14075979bcba..cb245edb2c2b 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -506,7 +506,7 @@ ENTRY(cpu_xscale_set_pte_ext)
506 506
507 .align 507 .align
508 508
509 __INIT 509 __CPUINIT
510 510
511 .type __xscale_setup, #function 511 .type __xscale_setup, #function
512__xscale_setup: 512__xscale_setup:
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index f3f288a9546d..53cd5b454673 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -13,6 +13,7 @@
13 */ 13 */
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <asm/assembler.h>
16#include <asm/asm-offsets.h> 17#include <asm/asm-offsets.h>
17#include <asm/page.h> 18#include <asm/page.h>
18#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
@@ -41,20 +42,15 @@ ENTRY(v7wbi_flush_user_tlb_range)
41 orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA 42 orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
42 mov r1, r1, lsl #PAGE_SHIFT 43 mov r1, r1, lsl #PAGE_SHIFT
431: 441:
44#ifdef CONFIG_SMP 45 ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
45 mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) 46 ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
46#else 47
47 mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA
48#endif
49 add r0, r0, #PAGE_SZ 48 add r0, r0, #PAGE_SZ
50 cmp r0, r1 49 cmp r0, r1
51 blo 1b 50 blo 1b
52 mov ip, #0 51 mov ip, #0
53#ifdef CONFIG_SMP 52 ALT_SMP(mcr p15, 0, ip, c7, c1, 6) @ flush BTAC/BTB Inner Shareable
54 mcr p15, 0, ip, c7, c1, 6 @ flush BTAC/BTB Inner Shareable 53 ALT_UP(mcr p15, 0, ip, c7, c5, 6) @ flush BTAC/BTB
55#else
56 mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
57#endif
58 dsb 54 dsb
59 mov pc, lr 55 mov pc, lr
60ENDPROC(v7wbi_flush_user_tlb_range) 56ENDPROC(v7wbi_flush_user_tlb_range)
@@ -74,20 +70,14 @@ ENTRY(v7wbi_flush_kern_tlb_range)
74 mov r0, r0, lsl #PAGE_SHIFT 70 mov r0, r0, lsl #PAGE_SHIFT
75 mov r1, r1, lsl #PAGE_SHIFT 71 mov r1, r1, lsl #PAGE_SHIFT
761: 721:
77#ifdef CONFIG_SMP 73 ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
78 mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) 74 ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
79#else
80 mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA
81#endif
82 add r0, r0, #PAGE_SZ 75 add r0, r0, #PAGE_SZ
83 cmp r0, r1 76 cmp r0, r1
84 blo 1b 77 blo 1b
85 mov r2, #0 78 mov r2, #0
86#ifdef CONFIG_SMP 79 ALT_SMP(mcr p15, 0, r2, c7, c1, 6) @ flush BTAC/BTB Inner Shareable
87 mcr p15, 0, r2, c7, c1, 6 @ flush BTAC/BTB Inner Shareable 80 ALT_UP(mcr p15, 0, r2, c7, c5, 6) @ flush BTAC/BTB
88#else
89 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
90#endif
91 dsb 81 dsb
92 isb 82 isb
93 mov pc, lr 83 mov pc, lr
@@ -99,5 +89,6 @@ ENDPROC(v7wbi_flush_kern_tlb_range)
99ENTRY(v7wbi_tlb_fns) 89ENTRY(v7wbi_tlb_fns)
100 .long v7wbi_flush_user_tlb_range 90 .long v7wbi_flush_user_tlb_range
101 .long v7wbi_flush_kern_tlb_range 91 .long v7wbi_flush_kern_tlb_range
102 .long v7wbi_tlb_flags 92 ALT_SMP(.long v7wbi_tlb_flags_smp)
93 ALT_UP(.long v7wbi_tlb_flags_up)
103 .size v7wbi_tlb_fns, . - v7wbi_tlb_fns 94 .size v7wbi_tlb_fns, . - v7wbi_tlb_fns