aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/cache-sh4.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-09-09 03:06:39 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-09-09 03:06:39 -0400
commitdeaef20e9789d93c06d2d3b5ffc99939814802ca (patch)
tree1f35d8baa4938c772201f983aed7761a88ce51d0 /arch/sh/mm/cache-sh4.c
parentbd6df57481b329dfeeb4889068848ee4f4761561 (diff)
sh: Rework sh4_flush_cache_page() for coherent kmap mapping.
This builds on top of the MIPS r4k code that does roughly the same thing. This permits the use of kmap_coherent() for mapped pages with dirty dcache lines and falls back on kmap_atomic() otherwise. This also fixes up a problem with the alias check and defers to shm_align_mask directly. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/cache-sh4.c')
-rw-r--r--arch/sh/mm/cache-sh4.c75
1 files changed, 48 insertions, 27 deletions
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 92b7d947db94..e3fbd99b323c 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -2,7 +2,7 @@
2 * arch/sh/mm/cache-sh4.c 2 * arch/sh/mm/cache-sh4.c
3 * 3 *
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka 4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2001 - 2007 Paul Mundt 5 * Copyright (C) 2001 - 2009 Paul Mundt
6 * Copyright (C) 2003 Richard Curnow 6 * Copyright (C) 2003 Richard Curnow
7 * Copyright (c) 2007 STMicroelectronics (R&D) Ltd. 7 * Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
8 * 8 *
@@ -15,6 +15,8 @@
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/highmem.h>
19#include <asm/pgtable.h>
18#include <asm/mmu_context.h> 20#include <asm/mmu_context.h>
19#include <asm/cacheflush.h> 21#include <asm/cacheflush.h>
20 22
@@ -23,7 +25,6 @@
23 * flushing. Anything exceeding this will simply flush the dcache in its 25 * flushing. Anything exceeding this will simply flush the dcache in its
24 * entirety. 26 * entirety.
25 */ 27 */
26#define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
27#define MAX_ICACHE_PAGES 32 28#define MAX_ICACHE_PAGES 32
28 29
29static void __flush_cache_4096(unsigned long addr, unsigned long phys, 30static void __flush_cache_4096(unsigned long addr, unsigned long phys,
@@ -209,44 +210,64 @@ static void sh4_flush_cache_page(void *args)
209{ 210{
210 struct flusher_data *data = args; 211 struct flusher_data *data = args;
211 struct vm_area_struct *vma; 212 struct vm_area_struct *vma;
213 struct page *page;
212 unsigned long address, pfn, phys; 214 unsigned long address, pfn, phys;
213 unsigned int alias_mask; 215 int map_coherent = 0;
216 pgd_t *pgd;
217 pud_t *pud;
218 pmd_t *pmd;
219 pte_t *pte;
220 void *vaddr;
214 221
215 vma = data->vma; 222 vma = data->vma;
216 address = data->addr1; 223 address = data->addr1;
217 pfn = data->addr2; 224 pfn = data->addr2;
218 phys = pfn << PAGE_SHIFT; 225 phys = pfn << PAGE_SHIFT;
226 page = pfn_to_page(pfn);
219 227
220 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) 228 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
221 return; 229 return;
222 230
223 alias_mask = boot_cpu_data.dcache.alias_mask; 231 address &= PAGE_MASK;
224 232 pgd = pgd_offset(vma->vm_mm, address);
225 /* We only need to flush D-cache when we have alias */ 233 pud = pud_offset(pgd, address);
226 if ((address^phys) & alias_mask) { 234 pmd = pmd_offset(pud, address);
227 /* Loop 4K of the D-cache */ 235 pte = pte_offset_kernel(pmd, address);
228 flush_cache_4096( 236
229 CACHE_OC_ADDRESS_ARRAY | (address & alias_mask), 237 /* If the page isn't present, there is nothing to do here. */
230 phys); 238 if (!(pte_val(*pte) & _PAGE_PRESENT))
231 /* Loop another 4K of the D-cache */ 239 return;
232 flush_cache_4096(
233 CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
234 phys);
235 }
236 240
237 alias_mask = boot_cpu_data.icache.alias_mask; 241 if ((vma->vm_mm == current->active_mm))
238 if (vma->vm_flags & VM_EXEC) { 242 vaddr = NULL;
243 else {
239 /* 244 /*
240 * Evict entries from the portion of the cache from which code 245 * Use kmap_coherent or kmap_atomic to do flushes for
241 * may have been executed at this address (virtual). There's 246 * another ASID than the current one.
242 * no need to evict from the portion corresponding to the
243 * physical address as for the D-cache, because we know the
244 * kernel has never executed the code through its identity
245 * translation.
246 */ 247 */
247 flush_cache_4096( 248 map_coherent = (current_cpu_data.dcache.n_aliases &&
248 CACHE_IC_ADDRESS_ARRAY | (address & alias_mask), 249 !test_bit(PG_dcache_dirty, &page->flags) &&
249 phys); 250 page_mapped(page));
251 if (map_coherent)
252 vaddr = kmap_coherent(page, address);
253 else
254 vaddr = kmap_atomic(page, KM_USER0);
255
256 address = (unsigned long)vaddr;
257 }
258
259 if (pages_do_alias(address, phys))
260 flush_cache_4096(CACHE_OC_ADDRESS_ARRAY |
261 (address & shm_align_mask), phys);
262
263 if (vma->vm_flags & VM_EXEC)
264 flush_icache_all();
265
266 if (vaddr) {
267 if (map_coherent)
268 kunmap_coherent(vaddr);
269 else
270 kunmap_atomic(vaddr, KM_USER0);
250 } 271 }
251} 272}
252 273