aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/pg-sh4.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-07-22 06:20:49 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-07-22 06:20:49 -0400
commit2277ab4a1df50e05bc732fe9488d4e902bb8399a (patch)
treef41cb47f15e02bbd1f79bf08ef7762d3bba934f6 /arch/sh/mm/pg-sh4.c
parentc0b96cf639aa1bfa8983f734d4225091aa813e00 (diff)
sh: Migrate from PG_mapped to PG_dcache_dirty.
This inverts the delayed dcache flush a bit to be more in line with other platforms. At the same time this also gives us the ability to do some more optimizations and cleanup. Now that the update_mmu_cache() callsite only tests for the bit, the implementation can gradually be split out and made generic, rather than relying on special implementations for each of the peculiar CPU types. SH7705 in 32kB mode and SH-4 still need slightly different handling, but this is something that can remain isolated in the varying page copy/clear routines. On top of that, SH-X3 is dcache coherent, so there is no need to bother with any of these tests in the PTEAEX version of update_mmu_cache(), so we kill that off too. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/pg-sh4.c')
-rw-r--r--arch/sh/mm/pg-sh4.c74
1 files changed, 29 insertions, 45 deletions
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c
index 2fe14da1f839..f3c4b2a54fc7 100644
--- a/arch/sh/mm/pg-sh4.c
+++ b/arch/sh/mm/pg-sh4.c
@@ -15,8 +15,6 @@
15#include <asm/mmu_context.h> 15#include <asm/mmu_context.h>
16#include <asm/cacheflush.h> 16#include <asm/cacheflush.h>
17 17
18#define CACHE_ALIAS (current_cpu_data.dcache.alias_mask)
19
20#define kmap_get_fixmap_pte(vaddr) \ 18#define kmap_get_fixmap_pte(vaddr) \
21 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) 19 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
22 20
@@ -68,10 +66,9 @@ static inline void kunmap_coherent(struct page *page)
68 */ 66 */
69void clear_user_page(void *to, unsigned long address, struct page *page) 67void clear_user_page(void *to, unsigned long address, struct page *page)
70{ 68{
71 __set_bit(PG_mapped, &page->flags);
72
73 clear_page(to); 69 clear_page(to);
74 if ((((address & PAGE_MASK) ^ (unsigned long)to) & CACHE_ALIAS)) 70
71 if (pages_do_alias((unsigned long)to, address & PAGE_MASK))
75 __flush_wback_region(to, PAGE_SIZE); 72 __flush_wback_region(to, PAGE_SIZE);
76} 73}
77 74
@@ -79,13 +76,14 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
79 unsigned long vaddr, void *dst, const void *src, 76 unsigned long vaddr, void *dst, const void *src,
80 unsigned long len) 77 unsigned long len)
81{ 78{
82 void *vto; 79 if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) {
83 80 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
84 __set_bit(PG_mapped, &page->flags); 81 memcpy(vto, src, len);
85 82 kunmap_coherent(vto);
86 vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); 83 } else {
87 memcpy(vto, src, len); 84 memcpy(dst, src, len);
88 kunmap_coherent(vto); 85 set_bit(PG_dcache_dirty, &page->flags);
86 }
89 87
90 if (vma->vm_flags & VM_EXEC) 88 if (vma->vm_flags & VM_EXEC)
91 flush_cache_page(vma, vaddr, page_to_pfn(page)); 89 flush_cache_page(vma, vaddr, page_to_pfn(page));
@@ -95,13 +93,14 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
95 unsigned long vaddr, void *dst, const void *src, 93 unsigned long vaddr, void *dst, const void *src,
96 unsigned long len) 94 unsigned long len)
97{ 95{
98 void *vfrom; 96 if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) {
99 97 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
100 __set_bit(PG_mapped, &page->flags); 98 memcpy(dst, vfrom, len);
101 99 kunmap_coherent(vfrom);
102 vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); 100 } else {
103 memcpy(dst, vfrom, len); 101 memcpy(dst, src, len);
104 kunmap_coherent(vfrom); 102 set_bit(PG_dcache_dirty, &page->flags);
103 }
105} 104}
106 105
107void copy_user_highpage(struct page *to, struct page *from, 106void copy_user_highpage(struct page *to, struct page *from,
@@ -109,14 +108,19 @@ void copy_user_highpage(struct page *to, struct page *from,
109{ 108{
110 void *vfrom, *vto; 109 void *vfrom, *vto;
111 110
112 __set_bit(PG_mapped, &to->flags);
113
114 vto = kmap_atomic(to, KM_USER1); 111 vto = kmap_atomic(to, KM_USER1);
115 vfrom = kmap_coherent(from, vaddr);
116 copy_page(vto, vfrom);
117 kunmap_coherent(vfrom);
118 112
119 if (((vaddr ^ (unsigned long)vto) & CACHE_ALIAS)) 113 if (page_mapped(from) && !test_bit(PG_dcache_dirty, &from->flags)) {
114 vfrom = kmap_coherent(from, vaddr);
115 copy_page(vto, vfrom);
116 kunmap_coherent(vfrom);
117 } else {
118 vfrom = kmap_atomic(from, KM_USER0);
119 copy_page(vto, vfrom);
120 kunmap_atomic(vfrom, KM_USER0);
121 }
122
123 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
120 __flush_wback_region(vto, PAGE_SIZE); 124 __flush_wback_region(vto, PAGE_SIZE);
121 125
122 kunmap_atomic(vto, KM_USER1); 126 kunmap_atomic(vto, KM_USER1);
@@ -124,23 +128,3 @@ void copy_user_highpage(struct page *to, struct page *from,
124 smp_wmb(); 128 smp_wmb();
125} 129}
126EXPORT_SYMBOL(copy_user_highpage); 130EXPORT_SYMBOL(copy_user_highpage);
127
128/*
129 * For SH-4, we have our own implementation for ptep_get_and_clear
130 */
131pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
132{
133 pte_t pte = *ptep;
134
135 pte_clear(mm, addr, ptep);
136 if (!pte_not_present(pte)) {
137 unsigned long pfn = pte_pfn(pte);
138 if (pfn_valid(pfn)) {
139 struct page *page = pfn_to_page(pfn);
140 struct address_space *mapping = page_mapping(page);
141 if (!mapping || !mapping_writably_mapped(mapping))
142 __clear_bit(PG_mapped, &page->flags);
143 }
144 }
145 return pte;
146}