aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mn10300/mm/cache.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2010-10-27 12:28:46 -0400
committerDavid Howells <dhowells@redhat.com>2010-10-27 12:28:46 -0400
commitb478491f2628114b2eae76587f22ce3789b66012 (patch)
tree038580a05fa1a3c8c9cbee0fa8743af2bba650b9 /arch/mn10300/mm/cache.c
parent9731d23710736b96786d68c2e63148ff3f22e6eb (diff)
MN10300: Allow some cacheflushes to be avoided if cache snooping is available
The AM34 core is able to do cache snooping, and so can skip some of the cache flushing. Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'arch/mn10300/mm/cache.c')
-rw-r--r--arch/mn10300/mm/cache.c90
1 files changed, 0 insertions, 90 deletions
diff --git a/arch/mn10300/mm/cache.c b/arch/mn10300/mm/cache.c
index 9261217e8d2c..bc35826f1357 100644
--- a/arch/mn10300/mm/cache.c
+++ b/arch/mn10300/mm/cache.c
@@ -37,96 +37,6 @@ EXPORT_SYMBOL(mn10300_dcache_flush_page);
37#endif 37#endif
38 38
39/* 39/*
40 * write a page back from the dcache and invalidate the icache so that we can
41 * run code from it that we've just written into it
42 */
43void flush_icache_page(struct vm_area_struct *vma, struct page *page)
44{
45 mn10300_dcache_flush_page(page_to_phys(page));
46 mn10300_icache_inv();
47}
48EXPORT_SYMBOL(flush_icache_page);
49
50/*
51 * write some code we've just written back from the dcache and invalidate the
52 * icache so that we can run that code
53 */
54void flush_icache_range(unsigned long start, unsigned long end)
55{
56#ifdef CONFIG_MN10300_CACHE_WBACK
57 unsigned long addr, size, base, off;
58 struct page *page;
59 pgd_t *pgd;
60 pud_t *pud;
61 pmd_t *pmd;
62 pte_t *ppte, pte;
63
64 if (end > 0x80000000UL) {
65 /* addresses above 0xa0000000 do not go through the cache */
66 if (end > 0xa0000000UL) {
67 end = 0xa0000000UL;
68 if (start >= end)
69 return;
70 }
71
72 /* kernel addresses between 0x80000000 and 0x9fffffff do not
73 * require page tables, so we just map such addresses directly */
74 base = (start >= 0x80000000UL) ? start : 0x80000000UL;
75 mn10300_dcache_flush_range(base, end);
76 if (base == start)
77 goto invalidate;
78 end = base;
79 }
80
81 for (; start < end; start += size) {
82 /* work out how much of the page to flush */
83 off = start & (PAGE_SIZE - 1);
84
85 size = end - start;
86 if (size > PAGE_SIZE - off)
87 size = PAGE_SIZE - off;
88
89 /* get the physical address the page is mapped to from the page
90 * tables */
91 pgd = pgd_offset(current->mm, start);
92 if (!pgd || !pgd_val(*pgd))
93 continue;
94
95 pud = pud_offset(pgd, start);
96 if (!pud || !pud_val(*pud))
97 continue;
98
99 pmd = pmd_offset(pud, start);
100 if (!pmd || !pmd_val(*pmd))
101 continue;
102
103 ppte = pte_offset_map(pmd, start);
104 if (!ppte)
105 continue;
106 pte = *ppte;
107 pte_unmap(ppte);
108
109 if (pte_none(pte))
110 continue;
111
112 page = pte_page(pte);
113 if (!page)
114 continue;
115
116 addr = page_to_phys(page);
117
118 /* flush the dcache and invalidate the icache coverage on that
119 * region */
120 mn10300_dcache_flush_range2(addr + off, size);
121 }
122#endif
123
124invalidate:
125 mn10300_icache_inv();
126}
127EXPORT_SYMBOL(flush_icache_range);
128
129/*
130 * allow userspace to flush the instruction cache 40 * allow userspace to flush the instruction cache
131 */ 41 */
132asmlinkage long sys_cacheflush(unsigned long start, unsigned long end) 42asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)