diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 2 | ||||
-rw-r--r-- | arch/arm/mm/alignment.c | 19 | ||||
-rw-r--r-- | arch/arm/mm/cache-v6.S | 30 | ||||
-rw-r--r-- | arch/arm/mm/cache-v7.S | 30 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4mc.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v6.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/copypage-xscale.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 8 | ||||
-rw-r--r-- | arch/arm/mm/fault-armv.c | 8 | ||||
-rw-r--r-- | arch/arm/mm/flush.c | 69 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 4 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 73 | ||||
-rw-r--r-- | arch/arm/mm/proc-v6.S | 43 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7.S | 99 | ||||
-rw-r--r-- | arch/arm/mm/tlb-v7.S | 33 |
15 files changed, 299 insertions, 125 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 33c3f570aaa0..a0a2928ae4dd 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -398,7 +398,7 @@ config CPU_V6 | |||
398 | # ARMv6k | 398 | # ARMv6k |
399 | config CPU_32v6K | 399 | config CPU_32v6K |
400 | bool "Support ARM V6K processor extensions" if !SMP | 400 | bool "Support ARM V6K processor extensions" if !SMP |
401 | depends on CPU_V6 | 401 | depends on CPU_V6 || CPU_V7 |
402 | default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) | 402 | default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) |
403 | help | 403 | help |
404 | Say Y here if your ARMv6 processor supports the 'K' extension. | 404 | Say Y here if your ARMv6 processor supports the 'K' extension. |
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index d073b64ae87e..724ba3bce72c 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c | |||
@@ -885,8 +885,23 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
885 | 885 | ||
886 | if (ai_usermode & UM_SIGNAL) | 886 | if (ai_usermode & UM_SIGNAL) |
887 | force_sig(SIGBUS, current); | 887 | force_sig(SIGBUS, current); |
888 | else | 888 | else { |
889 | set_cr(cr_no_alignment); | 889 | /* |
890 | * We're about to disable the alignment trap and return to | ||
891 | * user space. But if an interrupt occurs before actually | ||
892 | * reaching user space, then the IRQ vector entry code will | ||
893 | * notice that we were still in kernel space and therefore | ||
894 | * the alignment trap won't be re-enabled in that case as it | ||
895 | * is presumed to be always on from kernel space. | ||
896 | * Let's prevent that race by disabling interrupts here (they | ||
897 | * are disabled on the way back to user space anyway in | ||
898 | * entry-common.S) and disable the alignment trap only if | ||
899 | * there is no work pending for this thread. | ||
900 | */ | ||
901 | raw_local_irq_disable(); | ||
902 | if (!(current_thread_info()->flags & _TIF_WORK_MASK)) | ||
903 | set_cr(cr_no_alignment); | ||
904 | } | ||
890 | 905 | ||
891 | return 0; | 906 | return 0; |
892 | } | 907 | } |
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 86aa689ef1aa..99fa688dfadd 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S | |||
@@ -21,18 +21,22 @@ | |||
21 | #define D_CACHE_LINE_SIZE 32 | 21 | #define D_CACHE_LINE_SIZE 32 |
22 | #define BTB_FLUSH_SIZE 8 | 22 | #define BTB_FLUSH_SIZE 8 |
23 | 23 | ||
24 | #ifdef CONFIG_ARM_ERRATA_411920 | ||
25 | /* | 24 | /* |
26 | * Invalidate the entire I cache (this code is a workaround for the ARM1136 | 25 | * v6_flush_icache_all() |
27 | * erratum 411920 - Invalidate Instruction Cache operation can fail. This | 26 | * |
28 | * erratum is present in 1136, 1156 and 1176. It does not affect the MPCore. | 27 | * Flush the whole I-cache. |
29 | * | 28 | * |
30 | * Registers: | 29 | * ARM1136 erratum 411920 - Invalidate Instruction Cache operation can fail. |
31 | * r0 - set to 0 | 30 | * This erratum is present in 1136, 1156 and 1176. It does not affect the |
32 | * r1 - corrupted | 31 | * MPCore. |
32 | * | ||
33 | * Registers: | ||
34 | * r0 - set to 0 | ||
35 | * r1 - corrupted | ||
33 | */ | 36 | */ |
34 | ENTRY(v6_icache_inval_all) | 37 | ENTRY(v6_flush_icache_all) |
35 | mov r0, #0 | 38 | mov r0, #0 |
39 | #ifdef CONFIG_ARM_ERRATA_411920 | ||
36 | mrs r1, cpsr | 40 | mrs r1, cpsr |
37 | cpsid ifa @ disable interrupts | 41 | cpsid ifa @ disable interrupts |
38 | mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache | 42 | mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache |
@@ -43,8 +47,11 @@ ENTRY(v6_icache_inval_all) | |||
43 | .rept 11 @ ARM Ltd recommends at least | 47 | .rept 11 @ ARM Ltd recommends at least |
44 | nop @ 11 NOPs | 48 | nop @ 11 NOPs |
45 | .endr | 49 | .endr |
46 | mov pc, lr | 50 | #else |
51 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache | ||
47 | #endif | 52 | #endif |
53 | mov pc, lr | ||
54 | ENDPROC(v6_flush_icache_all) | ||
48 | 55 | ||
49 | /* | 56 | /* |
50 | * v6_flush_cache_all() | 57 | * v6_flush_cache_all() |
@@ -60,7 +67,7 @@ ENTRY(v6_flush_kern_cache_all) | |||
60 | #ifndef CONFIG_ARM_ERRATA_411920 | 67 | #ifndef CONFIG_ARM_ERRATA_411920 |
61 | mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate | 68 | mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate |
62 | #else | 69 | #else |
63 | b v6_icache_inval_all | 70 | b v6_flush_icache_all |
64 | #endif | 71 | #endif |
65 | #else | 72 | #else |
66 | mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate | 73 | mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate |
@@ -138,7 +145,7 @@ ENTRY(v6_coherent_user_range) | |||
138 | #ifndef CONFIG_ARM_ERRATA_411920 | 145 | #ifndef CONFIG_ARM_ERRATA_411920 |
139 | mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate | 146 | mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate |
140 | #else | 147 | #else |
141 | b v6_icache_inval_all | 148 | b v6_flush_icache_all |
142 | #endif | 149 | #endif |
143 | #else | 150 | #else |
144 | mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB | 151 | mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB |
@@ -312,6 +319,7 @@ ENDPROC(v6_dma_unmap_area) | |||
312 | 319 | ||
313 | .type v6_cache_fns, #object | 320 | .type v6_cache_fns, #object |
314 | ENTRY(v6_cache_fns) | 321 | ENTRY(v6_cache_fns) |
322 | .long v6_flush_icache_all | ||
315 | .long v6_flush_kern_cache_all | 323 | .long v6_flush_kern_cache_all |
316 | .long v6_flush_user_cache_all | 324 | .long v6_flush_user_cache_all |
317 | .long v6_flush_user_cache_range | 325 | .long v6_flush_user_cache_range |
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 37c8157e116e..a3ebf7a4f49b 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S | |||
@@ -18,6 +18,21 @@ | |||
18 | #include "proc-macros.S" | 18 | #include "proc-macros.S" |
19 | 19 | ||
20 | /* | 20 | /* |
21 | * v7_flush_icache_all() | ||
22 | * | ||
23 | * Flush the whole I-cache. | ||
24 | * | ||
25 | * Registers: | ||
26 | * r0 - set to 0 | ||
27 | */ | ||
28 | ENTRY(v7_flush_icache_all) | ||
29 | mov r0, #0 | ||
30 | ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable | ||
31 | ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate | ||
32 | mov pc, lr | ||
33 | ENDPROC(v7_flush_icache_all) | ||
34 | |||
35 | /* | ||
21 | * v7_flush_dcache_all() | 36 | * v7_flush_dcache_all() |
22 | * | 37 | * |
23 | * Flush the whole D-cache. | 38 | * Flush the whole D-cache. |
@@ -91,11 +106,8 @@ ENTRY(v7_flush_kern_cache_all) | |||
91 | THUMB( stmfd sp!, {r4-r7, r9-r11, lr} ) | 106 | THUMB( stmfd sp!, {r4-r7, r9-r11, lr} ) |
92 | bl v7_flush_dcache_all | 107 | bl v7_flush_dcache_all |
93 | mov r0, #0 | 108 | mov r0, #0 |
94 | #ifdef CONFIG_SMP | 109 | ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable |
95 | mcr p15, 0, r0, c7, c1, 0 @ invalidate I-cache inner shareable | 110 | ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate |
96 | #else | ||
97 | mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate | ||
98 | #endif | ||
99 | ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) | 111 | ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) |
100 | THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) | 112 | THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) |
101 | mov pc, lr | 113 | mov pc, lr |
@@ -171,11 +183,8 @@ ENTRY(v7_coherent_user_range) | |||
171 | cmp r0, r1 | 183 | cmp r0, r1 |
172 | blo 1b | 184 | blo 1b |
173 | mov r0, #0 | 185 | mov r0, #0 |
174 | #ifdef CONFIG_SMP | 186 | ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable |
175 | mcr p15, 0, r0, c7, c1, 6 @ invalidate BTB Inner Shareable | 187 | ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB |
176 | #else | ||
177 | mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB | ||
178 | #endif | ||
179 | dsb | 188 | dsb |
180 | isb | 189 | isb |
181 | mov pc, lr | 190 | mov pc, lr |
@@ -309,6 +318,7 @@ ENDPROC(v7_dma_unmap_area) | |||
309 | 318 | ||
310 | .type v7_cache_fns, #object | 319 | .type v7_cache_fns, #object |
311 | ENTRY(v7_cache_fns) | 320 | ENTRY(v7_cache_fns) |
321 | .long v7_flush_icache_all | ||
312 | .long v7_flush_kern_cache_all | 322 | .long v7_flush_kern_cache_all |
313 | .long v7_flush_user_cache_all | 323 | .long v7_flush_user_cache_all |
314 | .long v7_flush_user_cache_range | 324 | .long v7_flush_user_cache_range |
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 598c51ad5071..b8061519ce77 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c | |||
@@ -73,7 +73,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from, | |||
73 | { | 73 | { |
74 | void *kto = kmap_atomic(to, KM_USER1); | 74 | void *kto = kmap_atomic(to, KM_USER1); |
75 | 75 | ||
76 | if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) | 76 | if (!test_and_set_bit(PG_dcache_clean, &from->flags)) |
77 | __flush_dcache_page(page_mapping(from), from); | 77 | __flush_dcache_page(page_mapping(from), from); |
78 | 78 | ||
79 | spin_lock(&minicache_lock); | 79 | spin_lock(&minicache_lock); |
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index f55fa1044f72..bdba6c65c901 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c | |||
@@ -79,7 +79,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to, | |||
79 | unsigned int offset = CACHE_COLOUR(vaddr); | 79 | unsigned int offset = CACHE_COLOUR(vaddr); |
80 | unsigned long kfrom, kto; | 80 | unsigned long kfrom, kto; |
81 | 81 | ||
82 | if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) | 82 | if (!test_and_set_bit(PG_dcache_clean, &from->flags)) |
83 | __flush_dcache_page(page_mapping(from), from); | 83 | __flush_dcache_page(page_mapping(from), from); |
84 | 84 | ||
85 | /* FIXME: not highmem safe */ | 85 | /* FIXME: not highmem safe */ |
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 9920c0ae2096..649bbcd325bf 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c | |||
@@ -95,7 +95,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, | |||
95 | { | 95 | { |
96 | void *kto = kmap_atomic(to, KM_USER1); | 96 | void *kto = kmap_atomic(to, KM_USER1); |
97 | 97 | ||
98 | if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) | 98 | if (!test_and_set_bit(PG_dcache_clean, &from->flags)) |
99 | __flush_dcache_page(page_mapping(from), from); | 99 | __flush_dcache_page(page_mapping(from), from); |
100 | 100 | ||
101 | spin_lock(&minicache_lock); | 101 | spin_lock(&minicache_lock); |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index c704eed63c5d..e4dd0646e859 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -229,6 +229,8 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) | |||
229 | } | 229 | } |
230 | } while (size -= PAGE_SIZE); | 230 | } while (size -= PAGE_SIZE); |
231 | 231 | ||
232 | dsb(); | ||
233 | |||
232 | return (void *)c->vm_start; | 234 | return (void *)c->vm_start; |
233 | } | 235 | } |
234 | return NULL; | 236 | return NULL; |
@@ -521,6 +523,12 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, | |||
521 | outer_inv_range(paddr, paddr + size); | 523 | outer_inv_range(paddr, paddr + size); |
522 | 524 | ||
523 | dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); | 525 | dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); |
526 | |||
527 | /* | ||
528 | * Mark the D-cache clean for this page to avoid extra flushing. | ||
529 | */ | ||
530 | if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) | ||
531 | set_bit(PG_dcache_clean, &page->flags); | ||
524 | } | 532 | } |
525 | EXPORT_SYMBOL(___dma_page_dev_to_cpu); | 533 | EXPORT_SYMBOL(___dma_page_dev_to_cpu); |
526 | 534 | ||
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 9b906dec1ca1..8440d952ba6d 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c | |||
@@ -28,6 +28,7 @@ | |||
28 | 28 | ||
29 | static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; | 29 | static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; |
30 | 30 | ||
31 | #if __LINUX_ARM_ARCH__ < 6 | ||
31 | /* | 32 | /* |
32 | * We take the easy way out of this problem - we make the | 33 | * We take the easy way out of this problem - we make the |
33 | * PTE uncacheable. However, we leave the write buffer on. | 34 | * PTE uncacheable. However, we leave the write buffer on. |
@@ -141,7 +142,7 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, | |||
141 | * a page table, or changing an existing PTE. Basically, there are two | 142 | * a page table, or changing an existing PTE. Basically, there are two |
142 | * things that we need to take care of: | 143 | * things that we need to take care of: |
143 | * | 144 | * |
144 | * 1. If PG_dcache_dirty is set for the page, we need to ensure | 145 | * 1. If PG_dcache_clean is not set for the page, we need to ensure |
145 | * that any cache entries for the kernels virtual memory | 146 | * that any cache entries for the kernels virtual memory |
146 | * range are written back to the page. | 147 | * range are written back to the page. |
147 | * 2. If we have multiple shared mappings of the same space in | 148 | * 2. If we have multiple shared mappings of the same space in |
@@ -168,10 +169,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, | |||
168 | return; | 169 | return; |
169 | 170 | ||
170 | mapping = page_mapping(page); | 171 | mapping = page_mapping(page); |
171 | #ifndef CONFIG_SMP | 172 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) |
172 | if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) | ||
173 | __flush_dcache_page(mapping, page); | 173 | __flush_dcache_page(mapping, page); |
174 | #endif | ||
175 | if (mapping) { | 174 | if (mapping) { |
176 | if (cache_is_vivt()) | 175 | if (cache_is_vivt()) |
177 | make_coherent(mapping, vma, addr, ptep, pfn); | 176 | make_coherent(mapping, vma, addr, ptep, pfn); |
@@ -179,6 +178,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, | |||
179 | __flush_icache_all(); | 178 | __flush_icache_all(); |
180 | } | 179 | } |
181 | } | 180 | } |
181 | #endif /* __LINUX_ARM_ARCH__ < 6 */ | ||
182 | 182 | ||
183 | /* | 183 | /* |
184 | * Check whether the write buffer has physical address aliasing | 184 | * Check whether the write buffer has physical address aliasing |
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index c6844cb9b508..391ffae75098 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <asm/smp_plat.h> | 17 | #include <asm/smp_plat.h> |
18 | #include <asm/system.h> | 18 | #include <asm/system.h> |
19 | #include <asm/tlbflush.h> | 19 | #include <asm/tlbflush.h> |
20 | #include <asm/smp_plat.h> | ||
20 | 21 | ||
21 | #include "mm.h" | 22 | #include "mm.h" |
22 | 23 | ||
@@ -39,6 +40,18 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) | |||
39 | : "cc"); | 40 | : "cc"); |
40 | } | 41 | } |
41 | 42 | ||
43 | static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) | ||
44 | { | ||
45 | unsigned long colour = CACHE_COLOUR(vaddr); | ||
46 | unsigned long offset = vaddr & (PAGE_SIZE - 1); | ||
47 | unsigned long to; | ||
48 | |||
49 | set_pte_ext(TOP_PTE(ALIAS_FLUSH_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0); | ||
50 | to = ALIAS_FLUSH_START + (colour << PAGE_SHIFT) + offset; | ||
51 | flush_tlb_kernel_page(to); | ||
52 | flush_icache_range(to, to + len); | ||
53 | } | ||
54 | |||
42 | void flush_cache_mm(struct mm_struct *mm) | 55 | void flush_cache_mm(struct mm_struct *mm) |
43 | { | 56 | { |
44 | if (cache_is_vivt()) { | 57 | if (cache_is_vivt()) { |
@@ -89,16 +102,16 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig | |||
89 | if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) | 102 | if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) |
90 | __flush_icache_all(); | 103 | __flush_icache_all(); |
91 | } | 104 | } |
105 | |||
92 | #else | 106 | #else |
93 | #define flush_pfn_alias(pfn,vaddr) do { } while (0) | 107 | #define flush_pfn_alias(pfn,vaddr) do { } while (0) |
108 | #define flush_icache_alias(pfn,vaddr,len) do { } while (0) | ||
94 | #endif | 109 | #endif |
95 | 110 | ||
96 | #ifdef CONFIG_SMP | ||
97 | static void flush_ptrace_access_other(void *args) | 111 | static void flush_ptrace_access_other(void *args) |
98 | { | 112 | { |
99 | __flush_icache_all(); | 113 | __flush_icache_all(); |
100 | } | 114 | } |
101 | #endif | ||
102 | 115 | ||
103 | static | 116 | static |
104 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | 117 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, |
@@ -118,15 +131,16 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |||
118 | return; | 131 | return; |
119 | } | 132 | } |
120 | 133 | ||
121 | /* VIPT non-aliasing cache */ | 134 | /* VIPT non-aliasing D-cache */ |
122 | if (vma->vm_flags & VM_EXEC) { | 135 | if (vma->vm_flags & VM_EXEC) { |
123 | unsigned long addr = (unsigned long)kaddr; | 136 | unsigned long addr = (unsigned long)kaddr; |
124 | __cpuc_coherent_kern_range(addr, addr + len); | 137 | if (icache_is_vipt_aliasing()) |
125 | #ifdef CONFIG_SMP | 138 | flush_icache_alias(page_to_pfn(page), uaddr, len); |
139 | else | ||
140 | __cpuc_coherent_kern_range(addr, addr + len); | ||
126 | if (cache_ops_need_broadcast()) | 141 | if (cache_ops_need_broadcast()) |
127 | smp_call_function(flush_ptrace_access_other, | 142 | smp_call_function(flush_ptrace_access_other, |
128 | NULL, 1); | 143 | NULL, 1); |
129 | #endif | ||
130 | } | 144 | } |
131 | } | 145 | } |
132 | 146 | ||
@@ -215,6 +229,36 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p | |||
215 | flush_dcache_mmap_unlock(mapping); | 229 | flush_dcache_mmap_unlock(mapping); |
216 | } | 230 | } |
217 | 231 | ||
232 | #if __LINUX_ARM_ARCH__ >= 6 | ||
233 | void __sync_icache_dcache(pte_t pteval) | ||
234 | { | ||
235 | unsigned long pfn; | ||
236 | struct page *page; | ||
237 | struct address_space *mapping; | ||
238 | |||
239 | if (!pte_present_user(pteval)) | ||
240 | return; | ||
241 | if (cache_is_vipt_nonaliasing() && !pte_exec(pteval)) | ||
242 | /* only flush non-aliasing VIPT caches for exec mappings */ | ||
243 | return; | ||
244 | pfn = pte_pfn(pteval); | ||
245 | if (!pfn_valid(pfn)) | ||
246 | return; | ||
247 | |||
248 | page = pfn_to_page(pfn); | ||
249 | if (cache_is_vipt_aliasing()) | ||
250 | mapping = page_mapping(page); | ||
251 | else | ||
252 | mapping = NULL; | ||
253 | |||
254 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) | ||
255 | __flush_dcache_page(mapping, page); | ||
256 | /* pte_exec() already checked above for non-aliasing VIPT cache */ | ||
257 | if (cache_is_vipt_nonaliasing() || pte_exec(pteval)) | ||
258 | __flush_icache_all(); | ||
259 | } | ||
260 | #endif | ||
261 | |||
218 | /* | 262 | /* |
219 | * Ensure cache coherency between kernel mapping and userspace mapping | 263 | * Ensure cache coherency between kernel mapping and userspace mapping |
220 | * of this page. | 264 | * of this page. |
@@ -246,17 +290,16 @@ void flush_dcache_page(struct page *page) | |||
246 | 290 | ||
247 | mapping = page_mapping(page); | 291 | mapping = page_mapping(page); |
248 | 292 | ||
249 | #ifndef CONFIG_SMP | 293 | if (!cache_ops_need_broadcast() && |
250 | if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) | 294 | mapping && !mapping_mapped(mapping)) |
251 | set_bit(PG_dcache_dirty, &page->flags); | 295 | clear_bit(PG_dcache_clean, &page->flags); |
252 | else | 296 | else { |
253 | #endif | ||
254 | { | ||
255 | __flush_dcache_page(mapping, page); | 297 | __flush_dcache_page(mapping, page); |
256 | if (mapping && cache_is_vivt()) | 298 | if (mapping && cache_is_vivt()) |
257 | __flush_dcache_aliases(mapping, page); | 299 | __flush_dcache_aliases(mapping, page); |
258 | else if (mapping) | 300 | else if (mapping) |
259 | __flush_icache_all(); | 301 | __flush_icache_all(); |
302 | set_bit(PG_dcache_clean, &page->flags); | ||
260 | } | 303 | } |
261 | } | 304 | } |
262 | EXPORT_SYMBOL(flush_dcache_page); | 305 | EXPORT_SYMBOL(flush_dcache_page); |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 7185b00650fe..36c4553ffcce 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -277,7 +277,7 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) | |||
277 | 277 | ||
278 | /* Register the kernel text, kernel data and initrd with memblock. */ | 278 | /* Register the kernel text, kernel data and initrd with memblock. */ |
279 | #ifdef CONFIG_XIP_KERNEL | 279 | #ifdef CONFIG_XIP_KERNEL |
280 | memblock_reserve(__pa(_data), _end - _data); | 280 | memblock_reserve(__pa(_sdata), _end - _sdata); |
281 | #else | 281 | #else |
282 | memblock_reserve(__pa(_stext), _end - _stext); | 282 | memblock_reserve(__pa(_stext), _end - _stext); |
283 | #endif | 283 | #endif |
@@ -545,7 +545,7 @@ void __init mem_init(void) | |||
545 | 545 | ||
546 | MLK_ROUNDUP(__init_begin, __init_end), | 546 | MLK_ROUNDUP(__init_begin, __init_end), |
547 | MLK_ROUNDUP(_text, _etext), | 547 | MLK_ROUNDUP(_text, _etext), |
548 | MLK_ROUNDUP(_data, _edata)); | 548 | MLK_ROUNDUP(_sdata, _edata)); |
549 | 549 | ||
550 | #undef MLK | 550 | #undef MLK |
551 | #undef MLM | 551 | #undef MLM |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 6e1c4f6a2b3f..e2335811c02e 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/nodemask.h> | 15 | #include <linux/nodemask.h> |
16 | #include <linux/memblock.h> | 16 | #include <linux/memblock.h> |
17 | #include <linux/sort.h> | 17 | #include <linux/sort.h> |
18 | #include <linux/fs.h> | ||
18 | 19 | ||
19 | #include <asm/cputype.h> | 20 | #include <asm/cputype.h> |
20 | #include <asm/sections.h> | 21 | #include <asm/sections.h> |
@@ -246,6 +247,9 @@ static struct mem_type mem_types[] = { | |||
246 | .domain = DOMAIN_USER, | 247 | .domain = DOMAIN_USER, |
247 | }, | 248 | }, |
248 | [MT_MEMORY] = { | 249 | [MT_MEMORY] = { |
250 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | ||
251 | L_PTE_USER | L_PTE_EXEC, | ||
252 | .prot_l1 = PMD_TYPE_TABLE, | ||
249 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | 253 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, |
250 | .domain = DOMAIN_KERNEL, | 254 | .domain = DOMAIN_KERNEL, |
251 | }, | 255 | }, |
@@ -254,6 +258,9 @@ static struct mem_type mem_types[] = { | |||
254 | .domain = DOMAIN_KERNEL, | 258 | .domain = DOMAIN_KERNEL, |
255 | }, | 259 | }, |
256 | [MT_MEMORY_NONCACHED] = { | 260 | [MT_MEMORY_NONCACHED] = { |
261 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | ||
262 | L_PTE_USER | L_PTE_EXEC | L_PTE_MT_BUFFERABLE, | ||
263 | .prot_l1 = PMD_TYPE_TABLE, | ||
257 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | 264 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, |
258 | .domain = DOMAIN_KERNEL, | 265 | .domain = DOMAIN_KERNEL, |
259 | }, | 266 | }, |
@@ -303,9 +310,8 @@ static void __init build_mem_type_table(void) | |||
303 | cachepolicy = CPOLICY_WRITEBACK; | 310 | cachepolicy = CPOLICY_WRITEBACK; |
304 | ecc_mask = 0; | 311 | ecc_mask = 0; |
305 | } | 312 | } |
306 | #ifdef CONFIG_SMP | 313 | if (is_smp()) |
307 | cachepolicy = CPOLICY_WRITEALLOC; | 314 | cachepolicy = CPOLICY_WRITEALLOC; |
308 | #endif | ||
309 | 315 | ||
310 | /* | 316 | /* |
311 | * Strip out features not present on earlier architectures. | 317 | * Strip out features not present on earlier architectures. |
@@ -399,21 +405,22 @@ static void __init build_mem_type_table(void) | |||
399 | cp = &cache_policies[cachepolicy]; | 405 | cp = &cache_policies[cachepolicy]; |
400 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; | 406 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; |
401 | 407 | ||
402 | #ifndef CONFIG_SMP | ||
403 | /* | 408 | /* |
404 | * Only use write-through for non-SMP systems | 409 | * Only use write-through for non-SMP systems |
405 | */ | 410 | */ |
406 | if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) | 411 | if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) |
407 | vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; | 412 | vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; |
408 | #endif | ||
409 | 413 | ||
410 | /* | 414 | /* |
411 | * Enable CPU-specific coherency if supported. | 415 | * Enable CPU-specific coherency if supported. |
412 | * (Only available on XSC3 at the moment.) | 416 | * (Only available on XSC3 at the moment.) |
413 | */ | 417 | */ |
414 | if (arch_is_coherent() && cpu_is_xsc3()) | 418 | if (arch_is_coherent() && cpu_is_xsc3()) { |
415 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | 419 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; |
416 | 420 | mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; | |
421 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; | ||
422 | mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; | ||
423 | } | ||
417 | /* | 424 | /* |
418 | * ARMv6 and above have extended page tables. | 425 | * ARMv6 and above have extended page tables. |
419 | */ | 426 | */ |
@@ -426,20 +433,23 @@ static void __init build_mem_type_table(void) | |||
426 | mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 433 | mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
427 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 434 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
428 | 435 | ||
429 | #ifdef CONFIG_SMP | 436 | if (is_smp()) { |
430 | /* | 437 | /* |
431 | * Mark memory with the "shared" attribute for SMP systems | 438 | * Mark memory with the "shared" attribute |
432 | */ | 439 | * for SMP systems |
433 | user_pgprot |= L_PTE_SHARED; | 440 | */ |
434 | kern_pgprot |= L_PTE_SHARED; | 441 | user_pgprot |= L_PTE_SHARED; |
435 | vecs_pgprot |= L_PTE_SHARED; | 442 | kern_pgprot |= L_PTE_SHARED; |
436 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; | 443 | vecs_pgprot |= L_PTE_SHARED; |
437 | mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; | 444 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; |
438 | mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; | 445 | mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; |
439 | mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; | 446 | mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; |
440 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | 447 | mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; |
441 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; | 448 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; |
442 | #endif | 449 | mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; |
450 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; | ||
451 | mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; | ||
452 | } | ||
443 | } | 453 | } |
444 | 454 | ||
445 | /* | 455 | /* |
@@ -475,6 +485,8 @@ static void __init build_mem_type_table(void) | |||
475 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; | 485 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; |
476 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; | 486 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; |
477 | mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; | 487 | mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; |
488 | mem_types[MT_MEMORY].prot_pte |= kern_pgprot; | ||
489 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask; | ||
478 | mem_types[MT_ROM].prot_sect |= cp->pmd; | 490 | mem_types[MT_ROM].prot_sect |= cp->pmd; |
479 | 491 | ||
480 | switch (cp->pmd) { | 492 | switch (cp->pmd) { |
@@ -498,6 +510,19 @@ static void __init build_mem_type_table(void) | |||
498 | } | 510 | } |
499 | } | 511 | } |
500 | 512 | ||
513 | #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE | ||
514 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | ||
515 | unsigned long size, pgprot_t vma_prot) | ||
516 | { | ||
517 | if (!pfn_valid(pfn)) | ||
518 | return pgprot_noncached(vma_prot); | ||
519 | else if (file->f_flags & O_SYNC) | ||
520 | return pgprot_writecombine(vma_prot); | ||
521 | return vma_prot; | ||
522 | } | ||
523 | EXPORT_SYMBOL(phys_mem_access_prot); | ||
524 | #endif | ||
525 | |||
501 | #define vectors_base() (vectors_high() ? 0xffff0000 : 0) | 526 | #define vectors_base() (vectors_high() ? 0xffff0000 : 0) |
502 | 527 | ||
503 | static void __init *early_alloc(unsigned long sz) | 528 | static void __init *early_alloc(unsigned long sz) |
@@ -802,8 +827,7 @@ static void __init sanity_check_meminfo(void) | |||
802 | * rather difficult. | 827 | * rather difficult. |
803 | */ | 828 | */ |
804 | reason = "with VIPT aliasing cache"; | 829 | reason = "with VIPT aliasing cache"; |
805 | #ifdef CONFIG_SMP | 830 | } else if (is_smp() && tlb_ops_need_broadcast()) { |
806 | } else if (tlb_ops_need_broadcast()) { | ||
807 | /* | 831 | /* |
808 | * kmap_high needs to occasionally flush TLB entries, | 832 | * kmap_high needs to occasionally flush TLB entries, |
809 | * however, if the TLB entries need to be broadcast | 833 | * however, if the TLB entries need to be broadcast |
@@ -813,7 +837,6 @@ static void __init sanity_check_meminfo(void) | |||
813 | * (must not be called with irqs off) | 837 | * (must not be called with irqs off) |
814 | */ | 838 | */ |
815 | reason = "without hardware TLB ops broadcasting"; | 839 | reason = "without hardware TLB ops broadcasting"; |
816 | #endif | ||
817 | } | 840 | } |
818 | if (reason) { | 841 | if (reason) { |
819 | printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", | 842 | printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", |
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 22aac8515196..b95662dedb64 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S | |||
@@ -30,13 +30,10 @@ | |||
30 | #define TTB_RGN_WT (2 << 3) | 30 | #define TTB_RGN_WT (2 << 3) |
31 | #define TTB_RGN_WB (3 << 3) | 31 | #define TTB_RGN_WB (3 << 3) |
32 | 32 | ||
33 | #ifndef CONFIG_SMP | 33 | #define TTB_FLAGS_UP TTB_RGN_WBWA |
34 | #define TTB_FLAGS TTB_RGN_WBWA | 34 | #define PMD_FLAGS_UP PMD_SECT_WB |
35 | #define PMD_FLAGS PMD_SECT_WB | 35 | #define TTB_FLAGS_SMP TTB_RGN_WBWA|TTB_S |
36 | #else | 36 | #define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S |
37 | #define TTB_FLAGS TTB_RGN_WBWA|TTB_S | ||
38 | #define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S | ||
39 | #endif | ||
40 | 37 | ||
41 | ENTRY(cpu_v6_proc_init) | 38 | ENTRY(cpu_v6_proc_init) |
42 | mov pc, lr | 39 | mov pc, lr |
@@ -97,7 +94,8 @@ ENTRY(cpu_v6_switch_mm) | |||
97 | #ifdef CONFIG_MMU | 94 | #ifdef CONFIG_MMU |
98 | mov r2, #0 | 95 | mov r2, #0 |
99 | ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id | 96 | ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id |
100 | orr r0, r0, #TTB_FLAGS | 97 | ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) |
98 | ALT_UP(orr r0, r0, #TTB_FLAGS_UP) | ||
101 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB | 99 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB |
102 | mcr p15, 0, r2, c7, c10, 4 @ drain write buffer | 100 | mcr p15, 0, r2, c7, c10, 4 @ drain write buffer |
103 | mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 | 101 | mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 |
@@ -156,9 +154,11 @@ cpu_pj4_name: | |||
156 | */ | 154 | */ |
157 | __v6_setup: | 155 | __v6_setup: |
158 | #ifdef CONFIG_SMP | 156 | #ifdef CONFIG_SMP |
159 | mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode | 157 | ALT_SMP(mrc p15, 0, r0, c1, c0, 1) @ Enable SMP/nAMP mode |
158 | ALT_UP(nop) | ||
160 | orr r0, r0, #0x20 | 159 | orr r0, r0, #0x20 |
161 | mcr p15, 0, r0, c1, c0, 1 | 160 | ALT_SMP(mcr p15, 0, r0, c1, c0, 1) |
161 | ALT_UP(nop) | ||
162 | #endif | 162 | #endif |
163 | 163 | ||
164 | mov r0, #0 | 164 | mov r0, #0 |
@@ -169,7 +169,8 @@ __v6_setup: | |||
169 | #ifdef CONFIG_MMU | 169 | #ifdef CONFIG_MMU |
170 | mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs | 170 | mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs |
171 | mcr p15, 0, r0, c2, c0, 2 @ TTB control register | 171 | mcr p15, 0, r0, c2, c0, 2 @ TTB control register |
172 | orr r4, r4, #TTB_FLAGS | 172 | ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) |
173 | ALT_UP(orr r4, r4, #TTB_FLAGS_UP) | ||
173 | mcr p15, 0, r4, c2, c0, 1 @ load TTB1 | 174 | mcr p15, 0, r4, c2, c0, 1 @ load TTB1 |
174 | #endif /* CONFIG_MMU */ | 175 | #endif /* CONFIG_MMU */ |
175 | adr r5, v6_crval | 176 | adr r5, v6_crval |
@@ -225,10 +226,16 @@ cpu_elf_name: | |||
225 | __v6_proc_info: | 226 | __v6_proc_info: |
226 | .long 0x0007b000 | 227 | .long 0x0007b000 |
227 | .long 0x0007f000 | 228 | .long 0x0007f000 |
228 | .long PMD_TYPE_SECT | \ | 229 | ALT_SMP(.long \ |
230 | PMD_TYPE_SECT | \ | ||
231 | PMD_SECT_AP_WRITE | \ | ||
232 | PMD_SECT_AP_READ | \ | ||
233 | PMD_FLAGS_SMP) | ||
234 | ALT_UP(.long \ | ||
235 | PMD_TYPE_SECT | \ | ||
229 | PMD_SECT_AP_WRITE | \ | 236 | PMD_SECT_AP_WRITE | \ |
230 | PMD_SECT_AP_READ | \ | 237 | PMD_SECT_AP_READ | \ |
231 | PMD_FLAGS | 238 | PMD_FLAGS_UP) |
232 | .long PMD_TYPE_SECT | \ | 239 | .long PMD_TYPE_SECT | \ |
233 | PMD_SECT_XN | \ | 240 | PMD_SECT_XN | \ |
234 | PMD_SECT_AP_WRITE | \ | 241 | PMD_SECT_AP_WRITE | \ |
@@ -249,10 +256,16 @@ __v6_proc_info: | |||
249 | __pj4_v6_proc_info: | 256 | __pj4_v6_proc_info: |
250 | .long 0x560f5810 | 257 | .long 0x560f5810 |
251 | .long 0xff0ffff0 | 258 | .long 0xff0ffff0 |
252 | .long PMD_TYPE_SECT | \ | 259 | ALT_SMP(.long \ |
260 | PMD_TYPE_SECT | \ | ||
261 | PMD_SECT_AP_WRITE | \ | ||
262 | PMD_SECT_AP_READ | \ | ||
263 | PMD_FLAGS_SMP) | ||
264 | ALT_UP(.long \ | ||
265 | PMD_TYPE_SECT | \ | ||
253 | PMD_SECT_AP_WRITE | \ | 266 | PMD_SECT_AP_WRITE | \ |
254 | PMD_SECT_AP_READ | \ | 267 | PMD_SECT_AP_READ | \ |
255 | PMD_FLAGS | 268 | PMD_FLAGS_UP) |
256 | .long PMD_TYPE_SECT | \ | 269 | .long PMD_TYPE_SECT | \ |
257 | PMD_SECT_XN | \ | 270 | PMD_SECT_XN | \ |
258 | PMD_SECT_AP_WRITE | \ | 271 | PMD_SECT_AP_WRITE | \ |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 6a8506d99ee9..df422fee1cb6 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -30,15 +30,13 @@ | |||
30 | #define TTB_IRGN_WT ((1 << 0) | (0 << 6)) | 30 | #define TTB_IRGN_WT ((1 << 0) | (0 << 6)) |
31 | #define TTB_IRGN_WB ((1 << 0) | (1 << 6)) | 31 | #define TTB_IRGN_WB ((1 << 0) | (1 << 6)) |
32 | 32 | ||
33 | #ifndef CONFIG_SMP | ||
34 | /* PTWs cacheable, inner WB not shareable, outer WB not shareable */ | 33 | /* PTWs cacheable, inner WB not shareable, outer WB not shareable */ |
35 | #define TTB_FLAGS TTB_IRGN_WB|TTB_RGN_OC_WB | 34 | #define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB |
36 | #define PMD_FLAGS PMD_SECT_WB | 35 | #define PMD_FLAGS_UP PMD_SECT_WB |
37 | #else | 36 | |
38 | /* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ | 37 | /* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ |
39 | #define TTB_FLAGS TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA | 38 | #define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA |
40 | #define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S | 39 | #define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S |
41 | #endif | ||
42 | 40 | ||
43 | ENTRY(cpu_v7_proc_init) | 41 | ENTRY(cpu_v7_proc_init) |
44 | mov pc, lr | 42 | mov pc, lr |
@@ -105,7 +103,8 @@ ENTRY(cpu_v7_switch_mm) | |||
105 | #ifdef CONFIG_MMU | 103 | #ifdef CONFIG_MMU |
106 | mov r2, #0 | 104 | mov r2, #0 |
107 | ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id | 105 | ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id |
108 | orr r0, r0, #TTB_FLAGS | 106 | ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) |
107 | ALT_UP(orr r0, r0, #TTB_FLAGS_UP) | ||
109 | #ifdef CONFIG_ARM_ERRATA_430973 | 108 | #ifdef CONFIG_ARM_ERRATA_430973 |
110 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB | 109 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB |
111 | #endif | 110 | #endif |
@@ -186,13 +185,15 @@ cpu_v7_name: | |||
186 | * It is assumed that: | 185 | * It is assumed that: |
187 | * - cache type register is implemented | 186 | * - cache type register is implemented |
188 | */ | 187 | */ |
189 | __v7_setup: | 188 | __v7_ca9mp_setup: |
190 | #ifdef CONFIG_SMP | 189 | #ifdef CONFIG_SMP |
191 | mrc p15, 0, r0, c1, c0, 1 | 190 | ALT_SMP(mrc p15, 0, r0, c1, c0, 1) |
191 | ALT_UP(mov r0, #(1 << 6)) @ fake it for UP | ||
192 | tst r0, #(1 << 6) @ SMP/nAMP mode enabled? | 192 | tst r0, #(1 << 6) @ SMP/nAMP mode enabled? |
193 | orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and | 193 | orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and |
194 | mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting | 194 | mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting |
195 | #endif | 195 | #endif |
196 | __v7_setup: | ||
196 | adr r12, __v7_setup_stack @ the local stack | 197 | adr r12, __v7_setup_stack @ the local stack |
197 | stmia r12, {r0-r5, r7, r9, r11, lr} | 198 | stmia r12, {r0-r5, r7, r9, r11, lr} |
198 | bl v7_flush_dcache_all | 199 | bl v7_flush_dcache_all |
@@ -201,11 +202,16 @@ __v7_setup: | |||
201 | mrc p15, 0, r0, c0, c0, 0 @ read main ID register | 202 | mrc p15, 0, r0, c0, c0, 0 @ read main ID register |
202 | and r10, r0, #0xff000000 @ ARM? | 203 | and r10, r0, #0xff000000 @ ARM? |
203 | teq r10, #0x41000000 | 204 | teq r10, #0x41000000 |
204 | bne 2f | 205 | bne 3f |
205 | and r5, r0, #0x00f00000 @ variant | 206 | and r5, r0, #0x00f00000 @ variant |
206 | and r6, r0, #0x0000000f @ revision | 207 | and r6, r0, #0x0000000f @ revision |
207 | orr r0, r6, r5, lsr #20-4 @ combine variant and revision | 208 | orr r6, r6, r5, lsr #20-4 @ combine variant and revision |
209 | ubfx r0, r0, #4, #12 @ primary part number | ||
208 | 210 | ||
211 | /* Cortex-A8 Errata */ | ||
212 | ldr r10, =0x00000c08 @ Cortex-A8 primary part number | ||
213 | teq r0, r10 | ||
214 | bne 2f | ||
209 | #ifdef CONFIG_ARM_ERRATA_430973 | 215 | #ifdef CONFIG_ARM_ERRATA_430973 |
210 | teq r5, #0x00100000 @ only present in r1p* | 216 | teq r5, #0x00100000 @ only present in r1p* |
211 | mrceq p15, 0, r10, c1, c0, 1 @ read aux control register | 217 | mrceq p15, 0, r10, c1, c0, 1 @ read aux control register |
@@ -213,21 +219,42 @@ __v7_setup: | |||
213 | mcreq p15, 0, r10, c1, c0, 1 @ write aux control register | 219 | mcreq p15, 0, r10, c1, c0, 1 @ write aux control register |
214 | #endif | 220 | #endif |
215 | #ifdef CONFIG_ARM_ERRATA_458693 | 221 | #ifdef CONFIG_ARM_ERRATA_458693 |
216 | teq r0, #0x20 @ only present in r2p0 | 222 | teq r6, #0x20 @ only present in r2p0 |
217 | mrceq p15, 0, r10, c1, c0, 1 @ read aux control register | 223 | mrceq p15, 0, r10, c1, c0, 1 @ read aux control register |
218 | orreq r10, r10, #(1 << 5) @ set L1NEON to 1 | 224 | orreq r10, r10, #(1 << 5) @ set L1NEON to 1 |
219 | orreq r10, r10, #(1 << 9) @ set PLDNOP to 1 | 225 | orreq r10, r10, #(1 << 9) @ set PLDNOP to 1 |
220 | mcreq p15, 0, r10, c1, c0, 1 @ write aux control register | 226 | mcreq p15, 0, r10, c1, c0, 1 @ write aux control register |
221 | #endif | 227 | #endif |
222 | #ifdef CONFIG_ARM_ERRATA_460075 | 228 | #ifdef CONFIG_ARM_ERRATA_460075 |
223 | teq r0, #0x20 @ only present in r2p0 | 229 | teq r6, #0x20 @ only present in r2p0 |
224 | mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register | 230 | mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register |
225 | tsteq r10, #1 << 22 | 231 | tsteq r10, #1 << 22 |
226 | orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit | 232 | orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit |
227 | mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register | 233 | mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register |
228 | #endif | 234 | #endif |
235 | b 3f | ||
236 | |||
237 | /* Cortex-A9 Errata */ | ||
238 | 2: ldr r10, =0x00000c09 @ Cortex-A9 primary part number | ||
239 | teq r0, r10 | ||
240 | bne 3f | ||
241 | #ifdef CONFIG_ARM_ERRATA_742230 | ||
242 | cmp r6, #0x22 @ only present up to r2p2 | ||
243 | mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register | ||
244 | orrle r10, r10, #1 << 4 @ set bit #4 | ||
245 | mcrle p15, 0, r10, c15, c0, 1 @ write diagnostic register | ||
246 | #endif | ||
247 | #ifdef CONFIG_ARM_ERRATA_742231 | ||
248 | teq r6, #0x20 @ present in r2p0 | ||
249 | teqne r6, #0x21 @ present in r2p1 | ||
250 | teqne r6, #0x22 @ present in r2p2 | ||
251 | mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register | ||
252 | orreq r10, r10, #1 << 12 @ set bit #12 | ||
253 | orreq r10, r10, #1 << 22 @ set bit #22 | ||
254 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register | ||
255 | #endif | ||
229 | 256 | ||
230 | 2: mov r10, #0 | 257 | 3: mov r10, #0 |
231 | #ifdef HARVARD_CACHE | 258 | #ifdef HARVARD_CACHE |
232 | mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate | 259 | mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate |
233 | #endif | 260 | #endif |
@@ -235,7 +262,8 @@ __v7_setup: | |||
235 | #ifdef CONFIG_MMU | 262 | #ifdef CONFIG_MMU |
236 | mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs | 263 | mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs |
237 | mcr p15, 0, r10, c2, c0, 2 @ TTB control register | 264 | mcr p15, 0, r10, c2, c0, 2 @ TTB control register |
238 | orr r4, r4, #TTB_FLAGS | 265 | ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) |
266 | ALT_UP(orr r4, r4, #TTB_FLAGS_UP) | ||
239 | mcr p15, 0, r4, c2, c0, 1 @ load TTB1 | 267 | mcr p15, 0, r4, c2, c0, 1 @ load TTB1 |
240 | mov r10, #0x1f @ domains 0, 1 = manager | 268 | mov r10, #0x1f @ domains 0, 1 = manager |
241 | mcr p15, 0, r10, c3, c0, 0 @ load domain access register | 269 | mcr p15, 0, r10, c3, c0, 0 @ load domain access register |
@@ -323,6 +351,35 @@ cpu_elf_name: | |||
323 | 351 | ||
324 | .section ".proc.info.init", #alloc, #execinstr | 352 | .section ".proc.info.init", #alloc, #execinstr |
325 | 353 | ||
354 | .type __v7_ca9mp_proc_info, #object | ||
355 | __v7_ca9mp_proc_info: | ||
356 | .long 0x410fc090 @ Required ID value | ||
357 | .long 0xff0ffff0 @ Mask for ID | ||
358 | ALT_SMP(.long \ | ||
359 | PMD_TYPE_SECT | \ | ||
360 | PMD_SECT_AP_WRITE | \ | ||
361 | PMD_SECT_AP_READ | \ | ||
362 | PMD_FLAGS_SMP) | ||
363 | ALT_UP(.long \ | ||
364 | PMD_TYPE_SECT | \ | ||
365 | PMD_SECT_AP_WRITE | \ | ||
366 | PMD_SECT_AP_READ | \ | ||
367 | PMD_FLAGS_UP) | ||
368 | .long PMD_TYPE_SECT | \ | ||
369 | PMD_SECT_XN | \ | ||
370 | PMD_SECT_AP_WRITE | \ | ||
371 | PMD_SECT_AP_READ | ||
372 | b __v7_ca9mp_setup | ||
373 | .long cpu_arch_name | ||
374 | .long cpu_elf_name | ||
375 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
376 | .long cpu_v7_name | ||
377 | .long v7_processor_functions | ||
378 | .long v7wbi_tlb_fns | ||
379 | .long v6_user_fns | ||
380 | .long v7_cache_fns | ||
381 | .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info | ||
382 | |||
326 | /* | 383 | /* |
327 | * Match any ARMv7 processor core. | 384 | * Match any ARMv7 processor core. |
328 | */ | 385 | */ |
@@ -330,10 +387,16 @@ cpu_elf_name: | |||
330 | __v7_proc_info: | 387 | __v7_proc_info: |
331 | .long 0x000f0000 @ Required ID value | 388 | .long 0x000f0000 @ Required ID value |
332 | .long 0x000f0000 @ Mask for ID | 389 | .long 0x000f0000 @ Mask for ID |
333 | .long PMD_TYPE_SECT | \ | 390 | ALT_SMP(.long \ |
391 | PMD_TYPE_SECT | \ | ||
392 | PMD_SECT_AP_WRITE | \ | ||
393 | PMD_SECT_AP_READ | \ | ||
394 | PMD_FLAGS_SMP) | ||
395 | ALT_UP(.long \ | ||
396 | PMD_TYPE_SECT | \ | ||
334 | PMD_SECT_AP_WRITE | \ | 397 | PMD_SECT_AP_WRITE | \ |
335 | PMD_SECT_AP_READ | \ | 398 | PMD_SECT_AP_READ | \ |
336 | PMD_FLAGS | 399 | PMD_FLAGS_UP) |
337 | .long PMD_TYPE_SECT | \ | 400 | .long PMD_TYPE_SECT | \ |
338 | PMD_SECT_XN | \ | 401 | PMD_SECT_XN | \ |
339 | PMD_SECT_AP_WRITE | \ | 402 | PMD_SECT_AP_WRITE | \ |
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S index f3f288a9546d..53cd5b454673 100644 --- a/arch/arm/mm/tlb-v7.S +++ b/arch/arm/mm/tlb-v7.S | |||
@@ -13,6 +13,7 @@ | |||
13 | */ | 13 | */ |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/linkage.h> | 15 | #include <linux/linkage.h> |
16 | #include <asm/assembler.h> | ||
16 | #include <asm/asm-offsets.h> | 17 | #include <asm/asm-offsets.h> |
17 | #include <asm/page.h> | 18 | #include <asm/page.h> |
18 | #include <asm/tlbflush.h> | 19 | #include <asm/tlbflush.h> |
@@ -41,20 +42,15 @@ ENTRY(v7wbi_flush_user_tlb_range) | |||
41 | orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA | 42 | orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA |
42 | mov r1, r1, lsl #PAGE_SHIFT | 43 | mov r1, r1, lsl #PAGE_SHIFT |
43 | 1: | 44 | 1: |
44 | #ifdef CONFIG_SMP | 45 | ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable) |
45 | mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) | 46 | ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA |
46 | #else | 47 | |
47 | mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA | ||
48 | #endif | ||
49 | add r0, r0, #PAGE_SZ | 48 | add r0, r0, #PAGE_SZ |
50 | cmp r0, r1 | 49 | cmp r0, r1 |
51 | blo 1b | 50 | blo 1b |
52 | mov ip, #0 | 51 | mov ip, #0 |
53 | #ifdef CONFIG_SMP | 52 | ALT_SMP(mcr p15, 0, ip, c7, c1, 6) @ flush BTAC/BTB Inner Shareable |
54 | mcr p15, 0, ip, c7, c1, 6 @ flush BTAC/BTB Inner Shareable | 53 | ALT_UP(mcr p15, 0, ip, c7, c5, 6) @ flush BTAC/BTB |
55 | #else | ||
56 | mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB | ||
57 | #endif | ||
58 | dsb | 54 | dsb |
59 | mov pc, lr | 55 | mov pc, lr |
60 | ENDPROC(v7wbi_flush_user_tlb_range) | 56 | ENDPROC(v7wbi_flush_user_tlb_range) |
@@ -74,20 +70,14 @@ ENTRY(v7wbi_flush_kern_tlb_range) | |||
74 | mov r0, r0, lsl #PAGE_SHIFT | 70 | mov r0, r0, lsl #PAGE_SHIFT |
75 | mov r1, r1, lsl #PAGE_SHIFT | 71 | mov r1, r1, lsl #PAGE_SHIFT |
76 | 1: | 72 | 1: |
77 | #ifdef CONFIG_SMP | 73 | ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable) |
78 | mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) | 74 | ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA |
79 | #else | ||
80 | mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA | ||
81 | #endif | ||
82 | add r0, r0, #PAGE_SZ | 75 | add r0, r0, #PAGE_SZ |
83 | cmp r0, r1 | 76 | cmp r0, r1 |
84 | blo 1b | 77 | blo 1b |
85 | mov r2, #0 | 78 | mov r2, #0 |
86 | #ifdef CONFIG_SMP | 79 | ALT_SMP(mcr p15, 0, r2, c7, c1, 6) @ flush BTAC/BTB Inner Shareable |
87 | mcr p15, 0, r2, c7, c1, 6 @ flush BTAC/BTB Inner Shareable | 80 | ALT_UP(mcr p15, 0, r2, c7, c5, 6) @ flush BTAC/BTB |
88 | #else | ||
89 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB | ||
90 | #endif | ||
91 | dsb | 81 | dsb |
92 | isb | 82 | isb |
93 | mov pc, lr | 83 | mov pc, lr |
@@ -99,5 +89,6 @@ ENDPROC(v7wbi_flush_kern_tlb_range) | |||
99 | ENTRY(v7wbi_tlb_fns) | 89 | ENTRY(v7wbi_tlb_fns) |
100 | .long v7wbi_flush_user_tlb_range | 90 | .long v7wbi_flush_user_tlb_range |
101 | .long v7wbi_flush_kern_tlb_range | 91 | .long v7wbi_flush_kern_tlb_range |
102 | .long v7wbi_tlb_flags | 92 | ALT_SMP(.long v7wbi_tlb_flags_smp) |
93 | ALT_UP(.long v7wbi_tlb_flags_up) | ||
103 | .size v7wbi_tlb_fns, . - v7wbi_tlb_fns | 94 | .size v7wbi_tlb_fns, . - v7wbi_tlb_fns |