aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/cache.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-12-01 01:39:51 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-12-01 01:39:51 -0500
commit55661fc1f105ed75852e937bf8ea408270eb0cca (patch)
treeaa96c0c6dd0a8230f9373cab32cd069564d27d23 /arch/sh/mm/cache.c
parent22a5b566c8c442b0b35b3b106795e2f2b3578096 (diff)
sh: Assume new page cache pages have dirty dcache lines.
This follows the ARM change c01778001a4f5ad9c62d882776235f3f31922fdd ("ARM: 6379/1: Assume new page cache pages have dirty D-cache") for the same rationale: There are places in Linux where writes to newly allocated page cache pages happen without a subsequent call to flush_dcache_page() (several PIO drivers including USB HCD). This patch changes the meaning of PG_arch_1 to be PG_dcache_clean and always flush the D-cache for a newly mapped page in update_mmu_cache(). This addresses issues seen with executing binaries from MMC, in addition to some of the other HCDs that don't explicitly do cache management for their pipe-in buffers. Requested-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/cache.c')
-rw-r--r--arch/sh/mm/cache.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index ba401d137bb9..88d3dc3d30d5 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -60,14 +60,14 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
60 unsigned long len) 60 unsigned long len)
61{ 61{
62 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && 62 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
63 !test_bit(PG_dcache_dirty, &page->flags)) { 63 test_bit(PG_dcache_clean, &page->flags)) {
64 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); 64 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
65 memcpy(vto, src, len); 65 memcpy(vto, src, len);
66 kunmap_coherent(vto); 66 kunmap_coherent(vto);
67 } else { 67 } else {
68 memcpy(dst, src, len); 68 memcpy(dst, src, len);
69 if (boot_cpu_data.dcache.n_aliases) 69 if (boot_cpu_data.dcache.n_aliases)
70 set_bit(PG_dcache_dirty, &page->flags); 70 clear_bit(PG_dcache_clean, &page->flags);
71 } 71 }
72 72
73 if (vma->vm_flags & VM_EXEC) 73 if (vma->vm_flags & VM_EXEC)
@@ -79,14 +79,14 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
79 unsigned long len) 79 unsigned long len)
80{ 80{
81 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && 81 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
82 !test_bit(PG_dcache_dirty, &page->flags)) { 82 test_bit(PG_dcache_clean, &page->flags)) {
83 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); 83 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
84 memcpy(dst, vfrom, len); 84 memcpy(dst, vfrom, len);
85 kunmap_coherent(vfrom); 85 kunmap_coherent(vfrom);
86 } else { 86 } else {
87 memcpy(dst, src, len); 87 memcpy(dst, src, len);
88 if (boot_cpu_data.dcache.n_aliases) 88 if (boot_cpu_data.dcache.n_aliases)
89 set_bit(PG_dcache_dirty, &page->flags); 89 clear_bit(PG_dcache_clean, &page->flags);
90 } 90 }
91} 91}
92 92
@@ -98,7 +98,7 @@ void copy_user_highpage(struct page *to, struct page *from,
98 vto = kmap_atomic(to, KM_USER1); 98 vto = kmap_atomic(to, KM_USER1);
99 99
100 if (boot_cpu_data.dcache.n_aliases && page_mapped(from) && 100 if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
101 !test_bit(PG_dcache_dirty, &from->flags)) { 101 test_bit(PG_dcache_clean, &from->flags)) {
102 vfrom = kmap_coherent(from, vaddr); 102 vfrom = kmap_coherent(from, vaddr);
103 copy_page(vto, vfrom); 103 copy_page(vto, vfrom);
104 kunmap_coherent(vfrom); 104 kunmap_coherent(vfrom);
@@ -141,7 +141,7 @@ void __update_cache(struct vm_area_struct *vma,
141 141
142 page = pfn_to_page(pfn); 142 page = pfn_to_page(pfn);
143 if (pfn_valid(pfn)) { 143 if (pfn_valid(pfn)) {
144 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); 144 int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags);
145 if (dirty) 145 if (dirty)
146 __flush_purge_region(page_address(page), PAGE_SIZE); 146 __flush_purge_region(page_address(page), PAGE_SIZE);
147 } 147 }
@@ -153,7 +153,7 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
153 153
154 if (pages_do_alias(addr, vmaddr)) { 154 if (pages_do_alias(addr, vmaddr)) {
155 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && 155 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
156 !test_bit(PG_dcache_dirty, &page->flags)) { 156 test_bit(PG_dcache_clean, &page->flags)) {
157 void *kaddr; 157 void *kaddr;
158 158
159 kaddr = kmap_coherent(page, vmaddr); 159 kaddr = kmap_coherent(page, vmaddr);