aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig2
-rw-r--r--arch/arm/mm/cache-l2x0.c25
-rw-r--r--arch/arm/mm/copypage-v6.c8
-rw-r--r--arch/arm/mm/fault-armv.c9
-rw-r--r--arch/arm/mm/flush.c49
-rw-r--r--arch/arm/mm/mm.h2
-rw-r--r--arch/arm/mm/mmu.c2
7 files changed, 66 insertions, 31 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 9264d814cd7a..7b7d4c36c11c 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -774,5 +774,5 @@ config CACHE_XSC3L2
774 774
775config ARM_L1_CACHE_SHIFT 775config ARM_L1_CACHE_SHIFT
776 int 776 int
777 default 6 if ARCH_OMAP3 777 default 6 if ARCH_OMAP3 || ARCH_S5PC1XX
778 default 5 778 default 5
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index b480f1d3591f..747f9a9021bb 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -99,18 +99,25 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
99 99
100 l2x0_base = base; 100 l2x0_base = base;
101 101
102 /* disable L2X0 */ 102 /*
103 writel(0, l2x0_base + L2X0_CTRL); 103 * Check if l2x0 controller is already enabled.
104 * If you are booting from non-secure mode
105 * accessing the below registers will fault.
106 */
107 if (!(readl(l2x0_base + L2X0_CTRL) & 1)) {
104 108
105 aux = readl(l2x0_base + L2X0_AUX_CTRL); 109 /* l2x0 controller is disabled */
106 aux &= aux_mask;
107 aux |= aux_val;
108 writel(aux, l2x0_base + L2X0_AUX_CTRL);
109 110
110 l2x0_inv_all(); 111 aux = readl(l2x0_base + L2X0_AUX_CTRL);
112 aux &= aux_mask;
113 aux |= aux_val;
114 writel(aux, l2x0_base + L2X0_AUX_CTRL);
111 115
112 /* enable L2X0 */ 116 l2x0_inv_all();
113 writel(1, l2x0_base + L2X0_CTRL); 117
118 /* enable L2X0 */
119 writel(1, l2x0_base + L2X0_CTRL);
120 }
114 121
115 outer_cache.inv_range = l2x0_inv_range; 122 outer_cache.inv_range = l2x0_inv_range;
116 outer_cache.clean_range = l2x0_clean_range; 123 outer_cache.clean_range = l2x0_clean_range;
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 4127a7bddfe5..841f355319bf 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -41,6 +41,14 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
41 kfrom = kmap_atomic(from, KM_USER0); 41 kfrom = kmap_atomic(from, KM_USER0);
42 kto = kmap_atomic(to, KM_USER1); 42 kto = kmap_atomic(to, KM_USER1);
43 copy_page(kto, kfrom); 43 copy_page(kto, kfrom);
44#ifdef CONFIG_HIGHMEM
45 /*
46 * kmap_atomic() doesn't set the page virtual address, and
47 * kunmap_atomic() takes care of cache flushing already.
48 */
49 if (page_address(to) != NULL)
50#endif
51 __cpuc_flush_dcache_page(kto);
44 kunmap_atomic(kto, KM_USER1); 52 kunmap_atomic(kto, KM_USER1);
45 kunmap_atomic(kfrom, KM_USER0); 53 kunmap_atomic(kfrom, KM_USER0);
46} 54}
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index d0d17b6a3703..729602291958 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -23,6 +23,8 @@
23#include <asm/pgtable.h> 23#include <asm/pgtable.h>
24#include <asm/tlbflush.h> 24#include <asm/tlbflush.h>
25 25
26#include "mm.h"
27
26static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; 28static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
27 29
28/* 30/*
@@ -151,7 +153,14 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
151 if (!pfn_valid(pfn)) 153 if (!pfn_valid(pfn))
152 return; 154 return;
153 155
156 /*
157 * The zero page is never written to, so never has any dirty
158 * cache lines, and therefore never needs to be flushed.
159 */
154 page = pfn_to_page(pfn); 160 page = pfn_to_page(pfn);
161 if (page == ZERO_PAGE(0))
162 return;
163
155 mapping = page_mapping(page); 164 mapping = page_mapping(page);
156#ifndef CONFIG_SMP 165#ifndef CONFIG_SMP
157 if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) 166 if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 7f294f307c83..329594e760cd 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -35,14 +35,12 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
35 : 35 :
36 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) 36 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
37 : "cc"); 37 : "cc");
38 __flush_icache_all();
39} 38}
40 39
41void flush_cache_mm(struct mm_struct *mm) 40void flush_cache_mm(struct mm_struct *mm)
42{ 41{
43 if (cache_is_vivt()) { 42 if (cache_is_vivt()) {
44 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) 43 vivt_flush_cache_mm(mm);
45 __cpuc_flush_user_all();
46 return; 44 return;
47 } 45 }
48 46
@@ -52,16 +50,13 @@ void flush_cache_mm(struct mm_struct *mm)
52 : 50 :
53 : "r" (0) 51 : "r" (0)
54 : "cc"); 52 : "cc");
55 __flush_icache_all();
56 } 53 }
57} 54}
58 55
59void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 56void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
60{ 57{
61 if (cache_is_vivt()) { 58 if (cache_is_vivt()) {
62 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) 59 vivt_flush_cache_range(vma, start, end);
63 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
64 vma->vm_flags);
65 return; 60 return;
66 } 61 }
67 62
@@ -71,22 +66,26 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
71 : 66 :
72 : "r" (0) 67 : "r" (0)
73 : "cc"); 68 : "cc");
74 __flush_icache_all();
75 } 69 }
70
71 if (vma->vm_flags & VM_EXEC)
72 __flush_icache_all();
76} 73}
77 74
78void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) 75void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
79{ 76{
80 if (cache_is_vivt()) { 77 if (cache_is_vivt()) {
81 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 78 vivt_flush_cache_page(vma, user_addr, pfn);
82 unsigned long addr = user_addr & PAGE_MASK;
83 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
84 }
85 return; 79 return;
86 } 80 }
87 81
88 if (cache_is_vipt_aliasing()) 82 if (cache_is_vipt_aliasing()) {
89 flush_pfn_alias(pfn, user_addr); 83 flush_pfn_alias(pfn, user_addr);
84 __flush_icache_all();
85 }
86
87 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
88 __flush_icache_all();
90} 89}
91 90
92void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, 91void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
@@ -94,15 +93,13 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
94 unsigned long len, int write) 93 unsigned long len, int write)
95{ 94{
96 if (cache_is_vivt()) { 95 if (cache_is_vivt()) {
97 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 96 vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write);
98 unsigned long addr = (unsigned long)kaddr;
99 __cpuc_coherent_kern_range(addr, addr + len);
100 }
101 return; 97 return;
102 } 98 }
103 99
104 if (cache_is_vipt_aliasing()) { 100 if (cache_is_vipt_aliasing()) {
105 flush_pfn_alias(page_to_pfn(page), uaddr); 101 flush_pfn_alias(page_to_pfn(page), uaddr);
102 __flush_icache_all();
106 return; 103 return;
107 } 104 }
108 105
@@ -120,6 +117,8 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
120 117
121void __flush_dcache_page(struct address_space *mapping, struct page *page) 118void __flush_dcache_page(struct address_space *mapping, struct page *page)
122{ 119{
120 void *addr = page_address(page);
121
123 /* 122 /*
124 * Writeback any data associated with the kernel mapping of this 123 * Writeback any data associated with the kernel mapping of this
125 * page. This ensures that data in the physical page is mutually 124 * page. This ensures that data in the physical page is mutually
@@ -130,9 +129,9 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
130 * kmap_atomic() doesn't set the page virtual address, and 129 * kmap_atomic() doesn't set the page virtual address, and
131 * kunmap_atomic() takes care of cache flushing already. 130 * kunmap_atomic() takes care of cache flushing already.
132 */ 131 */
133 if (page_address(page)) 132 if (addr)
134#endif 133#endif
135 __cpuc_flush_dcache_page(page_address(page)); 134 __cpuc_flush_dcache_page(addr);
136 135
137 /* 136 /*
138 * If this is a page cache page, and we have an aliasing VIPT cache, 137 * If this is a page cache page, and we have an aliasing VIPT cache,
@@ -196,7 +195,16 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
196 */ 195 */
197void flush_dcache_page(struct page *page) 196void flush_dcache_page(struct page *page)
198{ 197{
199 struct address_space *mapping = page_mapping(page); 198 struct address_space *mapping;
199
200 /*
201 * The zero page is never written to, so never has any dirty
202 * cache lines, and therefore never needs to be flushed.
203 */
204 if (page == ZERO_PAGE(0))
205 return;
206
207 mapping = page_mapping(page);
200 208
201#ifndef CONFIG_SMP 209#ifndef CONFIG_SMP
202 if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) 210 if (!PageHighMem(page) && mapping && !mapping_mapped(mapping))
@@ -242,6 +250,7 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l
242 * userspace address only. 250 * userspace address only.
243 */ 251 */
244 flush_pfn_alias(pfn, vmaddr); 252 flush_pfn_alias(pfn, vmaddr);
253 __flush_icache_all();
245 } 254 }
246 255
247 /* 256 /*
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index c4f6f05198e0..a888363398f8 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -24,6 +24,8 @@ struct mem_type {
24 24
25const struct mem_type *get_mem_type(unsigned int type); 25const struct mem_type *get_mem_type(unsigned int type);
26 26
27extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
28
27#endif 29#endif
28 30
29struct map_desc; 31struct map_desc;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index ea67be0223ac..2427cdcd9098 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1036,7 +1036,7 @@ void __init paging_init(struct machine_desc *mdesc)
1036 */ 1036 */
1037 zero_page = alloc_bootmem_low_pages(PAGE_SIZE); 1037 zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
1038 empty_zero_page = virt_to_page(zero_page); 1038 empty_zero_page = virt_to_page(zero_page);
1039 flush_dcache_page(empty_zero_page); 1039 __flush_dcache_page(NULL, empty_zero_page);
1040} 1040}
1041 1041
1042/* 1042/*