aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/cache.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-09-03 04:21:10 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-09-03 04:21:10 -0400
commit0906a3ad33a254094fb74828e3ddb9af8771a6da (patch)
tree33acc1be2e213ae2f13439d3d5f8e9dd8a4f2d46 /arch/sh/mm/cache.c
parentd1af119a69fc9a625bd57a66d9c9fa88795b082c (diff)
sh: Fix up and optimize the kmap_coherent() interface.
This fixes up the kmap_coherent/kunmap_coherent() interface for recent changes both in the page fault path and the shared cache flushers, as well as adding in some optimizations. One of the key things to note here is that the TLB flush itself is deferred until the unmap, and the call in to update_mmu_cache() itself goes away, relying on the regular page fault path to handle the lazy dcache writeback if necessary. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/cache.c')
-rw-r--r--arch/sh/mm/cache.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index db2b1c5beffd..8e4a8d1ac4a9 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -51,7 +51,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
51 !test_bit(PG_dcache_dirty, &page->flags)) { 51 !test_bit(PG_dcache_dirty, &page->flags)) {
52 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); 52 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
53 memcpy(vto, src, len); 53 memcpy(vto, src, len);
54 kunmap_coherent(); 54 kunmap_coherent(vto);
55 } else { 55 } else {
56 memcpy(dst, src, len); 56 memcpy(dst, src, len);
57 if (boot_cpu_data.dcache.n_aliases) 57 if (boot_cpu_data.dcache.n_aliases)
@@ -70,7 +70,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
70 !test_bit(PG_dcache_dirty, &page->flags)) { 70 !test_bit(PG_dcache_dirty, &page->flags)) {
71 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); 71 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
72 memcpy(dst, vfrom, len); 72 memcpy(dst, vfrom, len);
73 kunmap_coherent(); 73 kunmap_coherent(vfrom);
74 } else { 74 } else {
75 memcpy(dst, src, len); 75 memcpy(dst, src, len);
76 if (boot_cpu_data.dcache.n_aliases) 76 if (boot_cpu_data.dcache.n_aliases)
@@ -89,7 +89,7 @@ void copy_user_highpage(struct page *to, struct page *from,
89 !test_bit(PG_dcache_dirty, &from->flags)) { 89 !test_bit(PG_dcache_dirty, &from->flags)) {
90 vfrom = kmap_coherent(from, vaddr); 90 vfrom = kmap_coherent(from, vaddr);
91 copy_page(vto, vfrom); 91 copy_page(vto, vfrom);
92 kunmap_coherent(); 92 kunmap_coherent(vfrom);
93 } else { 93 } else {
94 vfrom = kmap_atomic(from, KM_USER0); 94 vfrom = kmap_atomic(from, KM_USER0);
95 copy_page(vto, vfrom); 95 copy_page(vto, vfrom);
@@ -150,7 +150,7 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
150 150
151 kaddr = kmap_coherent(page, vmaddr); 151 kaddr = kmap_coherent(page, vmaddr);
152 __flush_wback_region((void *)kaddr, PAGE_SIZE); 152 __flush_wback_region((void *)kaddr, PAGE_SIZE);
153 kunmap_coherent(); 153 kunmap_coherent(kaddr);
154 } else 154 } else
155 __flush_wback_region((void *)addr, PAGE_SIZE); 155 __flush_wback_region((void *)addr, PAGE_SIZE);
156 } 156 }