aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/copypage-v6.c9
-rw-r--r--arch/arm/mm/dma-mapping.c5
-rw-r--r--arch/arm/mm/flush.c25
-rw-r--r--arch/arm/mm/highmem.c87
4 files changed, 107 insertions, 19 deletions
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 8bca4dea6df..f55fa1044f7 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -41,14 +41,7 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
41 kfrom = kmap_atomic(from, KM_USER0); 41 kfrom = kmap_atomic(from, KM_USER0);
42 kto = kmap_atomic(to, KM_USER1); 42 kto = kmap_atomic(to, KM_USER1);
43 copy_page(kto, kfrom); 43 copy_page(kto, kfrom);
44#ifdef CONFIG_HIGHMEM 44 __cpuc_flush_dcache_area(kto, PAGE_SIZE);
45 /*
46 * kmap_atomic() doesn't set the page virtual address, and
47 * kunmap_atomic() takes care of cache flushing already.
48 */
49 if (page_address(to) != NULL)
50#endif
51 __cpuc_flush_dcache_area(kto, PAGE_SIZE);
52 kunmap_atomic(kto, KM_USER1); 45 kunmap_atomic(kto, KM_USER1);
53 kunmap_atomic(kfrom, KM_USER0); 46 kunmap_atomic(kfrom, KM_USER0);
54} 47}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1351edc0b26..13fa536d82e 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -464,6 +464,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
464 vaddr += offset; 464 vaddr += offset;
465 op(vaddr, len, dir); 465 op(vaddr, len, dir);
466 kunmap_high(page); 466 kunmap_high(page);
467 } else if (cache_is_vipt()) {
468 pte_t saved_pte;
469 vaddr = kmap_high_l1_vipt(page, &saved_pte);
470 op(vaddr + offset, len, dir);
471 kunmap_high_l1_vipt(page, saved_pte);
467 } 472 }
468 } else { 473 } else {
469 vaddr = page_address(page) + offset; 474 vaddr = page_address(page) + offset;
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index e34f095e209..c6844cb9b50 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -13,6 +13,7 @@
13 13
14#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
15#include <asm/cachetype.h> 15#include <asm/cachetype.h>
16#include <asm/highmem.h>
16#include <asm/smp_plat.h> 17#include <asm/smp_plat.h>
17#include <asm/system.h> 18#include <asm/system.h>
18#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
@@ -152,21 +153,25 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
152 153
153void __flush_dcache_page(struct address_space *mapping, struct page *page) 154void __flush_dcache_page(struct address_space *mapping, struct page *page)
154{ 155{
155 void *addr = page_address(page);
156
157 /* 156 /*
158 * Writeback any data associated with the kernel mapping of this 157 * Writeback any data associated with the kernel mapping of this
159 * page. This ensures that data in the physical page is mutually 158 * page. This ensures that data in the physical page is mutually
160 * coherent with the kernels mapping. 159 * coherent with the kernels mapping.
161 */ 160 */
162#ifdef CONFIG_HIGHMEM 161 if (!PageHighMem(page)) {
163 /* 162 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
164 * kmap_atomic() doesn't set the page virtual address, and 163 } else {
165 * kunmap_atomic() takes care of cache flushing already. 164 void *addr = kmap_high_get(page);
166 */ 165 if (addr) {
167 if (addr) 166 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
168#endif 167 kunmap_high(page);
169 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 168 } else if (cache_is_vipt()) {
169 pte_t saved_pte;
170 addr = kmap_high_l1_vipt(page, &saved_pte);
171 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
172 kunmap_high_l1_vipt(page, saved_pte);
173 }
174 }
170 175
171 /* 176 /*
172 * If this is a page cache page, and we have an aliasing VIPT cache, 177 * If this is a page cache page, and we have an aliasing VIPT cache,
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 2be1ec7c1b4..77b030f5ec0 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -79,7 +79,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
79 unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); 79 unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
80 80
81 if (kvaddr >= (void *)FIXADDR_START) { 81 if (kvaddr >= (void *)FIXADDR_START) {
82 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); 82 if (cache_is_vivt())
83 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
83#ifdef CONFIG_DEBUG_HIGHMEM 84#ifdef CONFIG_DEBUG_HIGHMEM
84 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 85 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
85 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); 86 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
@@ -124,3 +125,87 @@ struct page *kmap_atomic_to_page(const void *ptr)
124 pte = TOP_PTE(vaddr); 125 pte = TOP_PTE(vaddr);
125 return pte_page(*pte); 126 return pte_page(*pte);
126} 127}
128
129#ifdef CONFIG_CPU_CACHE_VIPT
130
131#include <linux/percpu.h>
132
133/*
134 * The VIVT cache of a highmem page is always flushed before the page
135 * is unmapped. Hence unmapped highmem pages need no cache maintenance
136 * in that case.
137 *
138 * However unmapped pages may still be cached with a VIPT cache, and
139 * it is not possible to perform cache maintenance on them using physical
140 * addresses unfortunately. So we have no choice but to set up a temporary
141 * virtual mapping for that purpose.
142 *
143 * Yet this VIPT cache maintenance may be triggered from DMA support
144 * functions which are possibly called from interrupt context. As we don't
145 * want to keep interrupt disabled all the time when such maintenance is
146 * taking place, we therefore allow for some reentrancy by preserving and
147 * restoring the previous fixmap entry before the interrupted context is
148 * resumed. If the reentrancy depth is 0 then there is no need to restore
149 * the previous fixmap, and leaving the current one in place allow it to
150 * be reused the next time without a TLB flush (common with DMA).
151 */
152
153static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
154
155void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
156{
157 unsigned int idx, cpu = smp_processor_id();
158 int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
159 unsigned long vaddr, flags;
160 pte_t pte, *ptep;
161
162 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
163 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
164 ptep = TOP_PTE(vaddr);
165 pte = mk_pte(page, kmap_prot);
166
167 if (!in_interrupt())
168 preempt_disable();
169
170 raw_local_irq_save(flags);
171 (*depth)++;
172 if (pte_val(*ptep) == pte_val(pte)) {
173 *saved_pte = pte;
174 } else {
175 *saved_pte = *ptep;
176 set_pte_ext(ptep, pte, 0);
177 local_flush_tlb_kernel_page(vaddr);
178 }
179 raw_local_irq_restore(flags);
180
181 return (void *)vaddr;
182}
183
184void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
185{
186 unsigned int idx, cpu = smp_processor_id();
187 int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
188 unsigned long vaddr, flags;
189 pte_t pte, *ptep;
190
191 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
192 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
193 ptep = TOP_PTE(vaddr);
194 pte = mk_pte(page, kmap_prot);
195
196 BUG_ON(pte_val(*ptep) != pte_val(pte));
197 BUG_ON(*depth <= 0);
198
199 raw_local_irq_save(flags);
200 (*depth)--;
201 if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
202 set_pte_ext(ptep, saved_pte, 0);
203 local_flush_tlb_kernel_page(vaddr);
204 }
205 raw_local_irq_restore(flags);
206
207 if (!in_interrupt())
208 preempt_enable();
209}
210
211#endif /* CONFIG_CPU_CACHE_VIPT */