aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/highmem.h15
-rw-r--r--arch/arm/include/asm/kmap_types.h1
-rw-r--r--arch/arm/mm/copypage-v6.c9
-rw-r--r--arch/arm/mm/dma-mapping.c5
-rw-r--r--arch/arm/mm/flush.c25
-rw-r--r--arch/arm/mm/highmem.c87
6 files changed, 122 insertions, 20 deletions
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index 7f36d00600b4..feb988a7ec37 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -11,7 +11,11 @@
11 11
12#define kmap_prot PAGE_KERNEL 12#define kmap_prot PAGE_KERNEL
13 13
14#define flush_cache_kmaps() flush_cache_all() 14#define flush_cache_kmaps() \
15 do { \
16 if (cache_is_vivt()) \
17 flush_cache_all(); \
18 } while (0)
15 19
16extern pte_t *pkmap_page_table; 20extern pte_t *pkmap_page_table;
17 21
@@ -21,11 +25,20 @@ extern void *kmap_high(struct page *page);
21extern void *kmap_high_get(struct page *page); 25extern void *kmap_high_get(struct page *page);
22extern void kunmap_high(struct page *page); 26extern void kunmap_high(struct page *page);
23 27
28extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte);
29extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
30
31/*
32 * The following functions are already defined by <linux/highmem.h>
33 * when CONFIG_HIGHMEM is not set.
34 */
35#ifdef CONFIG_HIGHMEM
24extern void *kmap(struct page *page); 36extern void *kmap(struct page *page);
25extern void kunmap(struct page *page); 37extern void kunmap(struct page *page);
26extern void *kmap_atomic(struct page *page, enum km_type type); 38extern void *kmap_atomic(struct page *page, enum km_type type);
27extern void kunmap_atomic(void *kvaddr, enum km_type type); 39extern void kunmap_atomic(void *kvaddr, enum km_type type);
28extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); 40extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
29extern struct page *kmap_atomic_to_page(const void *ptr); 41extern struct page *kmap_atomic_to_page(const void *ptr);
42#endif
30 43
31#endif 44#endif
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
index c019949a5189..c4b2ea3fbe42 100644
--- a/arch/arm/include/asm/kmap_types.h
+++ b/arch/arm/include/asm/kmap_types.h
@@ -18,6 +18,7 @@ enum km_type {
18 KM_IRQ1, 18 KM_IRQ1,
19 KM_SOFTIRQ0, 19 KM_SOFTIRQ0,
20 KM_SOFTIRQ1, 20 KM_SOFTIRQ1,
21 KM_L1_CACHE,
21 KM_L2_CACHE, 22 KM_L2_CACHE,
22 KM_TYPE_NR 23 KM_TYPE_NR
23}; 24};
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 8bca4dea6dfa..f55fa1044f72 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -41,14 +41,7 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
41 kfrom = kmap_atomic(from, KM_USER0); 41 kfrom = kmap_atomic(from, KM_USER0);
42 kto = kmap_atomic(to, KM_USER1); 42 kto = kmap_atomic(to, KM_USER1);
43 copy_page(kto, kfrom); 43 copy_page(kto, kfrom);
44#ifdef CONFIG_HIGHMEM 44 __cpuc_flush_dcache_area(kto, PAGE_SIZE);
45 /*
46 * kmap_atomic() doesn't set the page virtual address, and
47 * kunmap_atomic() takes care of cache flushing already.
48 */
49 if (page_address(to) != NULL)
50#endif
51 __cpuc_flush_dcache_area(kto, PAGE_SIZE);
52 kunmap_atomic(kto, KM_USER1); 45 kunmap_atomic(kto, KM_USER1);
53 kunmap_atomic(kfrom, KM_USER0); 46 kunmap_atomic(kfrom, KM_USER0);
54} 47}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1351edc0b26f..13fa536d82e6 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -464,6 +464,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
464 vaddr += offset; 464 vaddr += offset;
465 op(vaddr, len, dir); 465 op(vaddr, len, dir);
466 kunmap_high(page); 466 kunmap_high(page);
467 } else if (cache_is_vipt()) {
468 pte_t saved_pte;
469 vaddr = kmap_high_l1_vipt(page, &saved_pte);
470 op(vaddr + offset, len, dir);
471 kunmap_high_l1_vipt(page, saved_pte);
467 } 472 }
468 } else { 473 } else {
469 vaddr = page_address(page) + offset; 474 vaddr = page_address(page) + offset;
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index e34f095e2090..c6844cb9b508 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -13,6 +13,7 @@
13 13
14#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
15#include <asm/cachetype.h> 15#include <asm/cachetype.h>
16#include <asm/highmem.h>
16#include <asm/smp_plat.h> 17#include <asm/smp_plat.h>
17#include <asm/system.h> 18#include <asm/system.h>
18#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
@@ -152,21 +153,25 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
152 153
153void __flush_dcache_page(struct address_space *mapping, struct page *page) 154void __flush_dcache_page(struct address_space *mapping, struct page *page)
154{ 155{
155 void *addr = page_address(page);
156
157 /* 156 /*
158 * Writeback any data associated with the kernel mapping of this 157 * Writeback any data associated with the kernel mapping of this
159 * page. This ensures that data in the physical page is mutually 158 * page. This ensures that data in the physical page is mutually
160 * coherent with the kernels mapping. 159 * coherent with the kernels mapping.
161 */ 160 */
162#ifdef CONFIG_HIGHMEM 161 if (!PageHighMem(page)) {
163 /* 162 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
164 * kmap_atomic() doesn't set the page virtual address, and 163 } else {
165 * kunmap_atomic() takes care of cache flushing already. 164 void *addr = kmap_high_get(page);
166 */ 165 if (addr) {
167 if (addr) 166 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
168#endif 167 kunmap_high(page);
169 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 168 } else if (cache_is_vipt()) {
169 pte_t saved_pte;
170 addr = kmap_high_l1_vipt(page, &saved_pte);
171 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
172 kunmap_high_l1_vipt(page, saved_pte);
173 }
174 }
170 175
171 /* 176 /*
172 * If this is a page cache page, and we have an aliasing VIPT cache, 177 * If this is a page cache page, and we have an aliasing VIPT cache,
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 2be1ec7c1b41..77b030f5ec09 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -79,7 +79,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
79 unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); 79 unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
80 80
81 if (kvaddr >= (void *)FIXADDR_START) { 81 if (kvaddr >= (void *)FIXADDR_START) {
82 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); 82 if (cache_is_vivt())
83 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
83#ifdef CONFIG_DEBUG_HIGHMEM 84#ifdef CONFIG_DEBUG_HIGHMEM
84 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 85 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
85 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); 86 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
@@ -124,3 +125,87 @@ struct page *kmap_atomic_to_page(const void *ptr)
124 pte = TOP_PTE(vaddr); 125 pte = TOP_PTE(vaddr);
125 return pte_page(*pte); 126 return pte_page(*pte);
126} 127}
128
129#ifdef CONFIG_CPU_CACHE_VIPT
130
131#include <linux/percpu.h>
132
133/*
134 * The VIVT cache of a highmem page is always flushed before the page
135 * is unmapped. Hence unmapped highmem pages need no cache maintenance
136 * in that case.
137 *
138 * However unmapped pages may still be cached with a VIPT cache, and
139 * it is not possible to perform cache maintenance on them using physical
140 * addresses unfortunately. So we have no choice but to set up a temporary
141 * virtual mapping for that purpose.
142 *
143 * Yet this VIPT cache maintenance may be triggered from DMA support
144 * functions which are possibly called from interrupt context. As we don't
145 * want to keep interrupt disabled all the time when such maintenance is
146 * taking place, we therefore allow for some reentrancy by preserving and
147 * restoring the previous fixmap entry before the interrupted context is
148 * resumed. If the reentrancy depth is 0 then there is no need to restore
149 * the previous fixmap, and leaving the current one in place allow it to
150 * be reused the next time without a TLB flush (common with DMA).
151 */
152
153static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
154
155void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
156{
157 unsigned int idx, cpu = smp_processor_id();
158 int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
159 unsigned long vaddr, flags;
160 pte_t pte, *ptep;
161
162 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
163 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
164 ptep = TOP_PTE(vaddr);
165 pte = mk_pte(page, kmap_prot);
166
167 if (!in_interrupt())
168 preempt_disable();
169
170 raw_local_irq_save(flags);
171 (*depth)++;
172 if (pte_val(*ptep) == pte_val(pte)) {
173 *saved_pte = pte;
174 } else {
175 *saved_pte = *ptep;
176 set_pte_ext(ptep, pte, 0);
177 local_flush_tlb_kernel_page(vaddr);
178 }
179 raw_local_irq_restore(flags);
180
181 return (void *)vaddr;
182}
183
184void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
185{
186 unsigned int idx, cpu = smp_processor_id();
187 int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
188 unsigned long vaddr, flags;
189 pte_t pte, *ptep;
190
191 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
192 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
193 ptep = TOP_PTE(vaddr);
194 pte = mk_pte(page, kmap_prot);
195
196 BUG_ON(pte_val(*ptep) != pte_val(pte));
197 BUG_ON(*depth <= 0);
198
199 raw_local_irq_save(flags);
200 (*depth)--;
201 if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
202 set_pte_ext(ptep, saved_pte, 0);
203 local_flush_tlb_kernel_page(vaddr);
204 }
205 raw_local_irq_restore(flags);
206
207 if (!in_interrupt())
208 preempt_enable();
209}
210
211#endif /* CONFIG_CPU_CACHE_VIPT */