aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/pg-sh7705.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-07-22 06:20:49 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-07-22 06:20:49 -0400
commit2277ab4a1df50e05bc732fe9488d4e902bb8399a (patch)
treef41cb47f15e02bbd1f79bf08ef7762d3bba934f6 /arch/sh/mm/pg-sh7705.c
parentc0b96cf639aa1bfa8983f734d4225091aa813e00 (diff)
sh: Migrate from PG_mapped to PG_dcache_dirty.
This inverts the delayed dcache flush a bit to be more in line with other platforms. At the same time this also gives us the ability to do some more optimizations and cleanup. Now that the update_mmu_cache() callsite only tests for the bit, the implementation can gradually be split out and made generic, rather than relying on special implementations for each of the peculiar CPU types. SH7705 in 32kB mode and SH-4 still need slightly different handling, but this is something that can remain isolated in the varying page copy/clear routines. On top of that, SH-X3 is dcache coherent, so there is no need to bother with any of these tests in the PTEAEX version of update_mmu_cache(), so we kill that off too. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/pg-sh7705.c')
-rw-r--r--arch/sh/mm/pg-sh7705.c52
1 files changed, 8 insertions, 44 deletions
diff --git a/arch/sh/mm/pg-sh7705.c b/arch/sh/mm/pg-sh7705.c
index eaf25147194c..684891b5c8c0 100644
--- a/arch/sh/mm/pg-sh7705.c
+++ b/arch/sh/mm/pg-sh7705.c
@@ -26,7 +26,7 @@
26#include <asm/mmu_context.h> 26#include <asm/mmu_context.h>
27#include <asm/cacheflush.h> 27#include <asm/cacheflush.h>
28 28
29static inline void __flush_purge_virtual_region(void *p1, void *virt, int size) 29static void __flush_purge_virtual_region(void *p1, void *virt, int size)
30{ 30{
31 unsigned long v; 31 unsigned long v;
32 unsigned long begin, end; 32 unsigned long begin, end;
@@ -75,19 +75,13 @@ static inline void __flush_purge_virtual_region(void *p1, void *virt, int size)
75 */ 75 */
76void clear_user_page(void *to, unsigned long address, struct page *pg) 76void clear_user_page(void *to, unsigned long address, struct page *pg)
77{ 77{
78 struct page *page = virt_to_page(to); 78 if (pages_do_alias(address, (unsigned long)to))
79
80 __set_bit(PG_mapped, &page->flags);
81 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) {
82 clear_page(to);
83 __flush_wback_region(to, PAGE_SIZE);
84 } else {
85 __flush_purge_virtual_region(to, 79 __flush_purge_virtual_region(to,
86 (void *)(address & 0xfffff000), 80 (void *)(address & 0xfffff000),
87 PAGE_SIZE); 81 PAGE_SIZE);
88 clear_page(to); 82
89 __flush_wback_region(to, PAGE_SIZE); 83 clear_page(to);
90 } 84 __flush_wback_region(to, PAGE_SIZE);
91} 85}
92 86
93/* 87/*
@@ -98,41 +92,11 @@ void clear_user_page(void *to, unsigned long address, struct page *pg)
98 */ 92 */
99void copy_user_page(void *to, void *from, unsigned long address, struct page *pg) 93void copy_user_page(void *to, void *from, unsigned long address, struct page *pg)
100{ 94{
101 struct page *page = virt_to_page(to); 95 if (pages_do_alias(address, (unsigned long)to))
102
103
104 __set_bit(PG_mapped, &page->flags);
105 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) {
106 copy_page(to, from);
107 __flush_wback_region(to, PAGE_SIZE);
108 } else {
109 __flush_purge_virtual_region(to, 96 __flush_purge_virtual_region(to,
110 (void *)(address & 0xfffff000), 97 (void *)(address & 0xfffff000),
111 PAGE_SIZE); 98 PAGE_SIZE);
112 copy_page(to, from);
113 __flush_wback_region(to, PAGE_SIZE);
114 }
115}
116 99
117/* 100 copy_page(to, from);
118 * For SH7705, we have our own implementation for ptep_get_and_clear 101 __flush_wback_region(to, PAGE_SIZE);
119 * Copied from pg-sh4.c
120 */
121pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
122{
123 pte_t pte = *ptep;
124
125 pte_clear(mm, addr, ptep);
126 if (!pte_not_present(pte)) {
127 unsigned long pfn = pte_pfn(pte);
128 if (pfn_valid(pfn)) {
129 struct page *page = pfn_to_page(pfn);
130 struct address_space *mapping = page_mapping(page);
131 if (!mapping || !mapping_writably_mapped(mapping))
132 __clear_bit(PG_mapped, &page->flags);
133 }
134 }
135
136 return pte;
137} 102}
138