diff options
Diffstat (limited to 'arch/sh/mm/cache-sh7705.c')
-rw-r--r-- | arch/sh/mm/cache-sh7705.c | 206 |
1 files changed, 206 insertions, 0 deletions
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c new file mode 100644 index 000000000000..ad8ed7d41e16 --- /dev/null +++ b/arch/sh/mm/cache-sh7705.c | |||
@@ -0,0 +1,206 @@ | |||
1 | /* | ||
2 | * arch/sh/mm/cache-sh7705.c | ||
3 | * | ||
4 | * Copyright (C) 1999, 2000 Niibe Yutaka | ||
5 | * Copyright (C) 2004 Alex Song | ||
6 | * | ||
7 | * This file is subject to the terms and conditions of the GNU General Public | ||
8 | * License. See the file "COPYING" in the main directory of this archive | ||
9 | * for more details. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/init.h> | ||
14 | #include <linux/mman.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/threads.h> | ||
17 | #include <asm/addrspace.h> | ||
18 | #include <asm/page.h> | ||
19 | #include <asm/pgtable.h> | ||
20 | #include <asm/processor.h> | ||
21 | #include <asm/cache.h> | ||
22 | #include <asm/io.h> | ||
23 | #include <asm/uaccess.h> | ||
24 | #include <asm/pgalloc.h> | ||
25 | #include <asm/mmu_context.h> | ||
26 | #include <asm/cacheflush.h> | ||
27 | |||
28 | /* The 32KB cache on the SH7705 suffers from the same synonym problem | ||
29 | * as SH4 CPUs */ | ||
30 | |||
31 | #define __pte_offset(address) \ | ||
32 | ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | ||
33 | #define pte_offset(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \ | ||
34 | __pte_offset(address)) | ||
35 | |||
36 | static inline void cache_wback_all(void) | ||
37 | { | ||
38 | unsigned long ways, waysize, addrstart; | ||
39 | |||
40 | ways = cpu_data->dcache.ways; | ||
41 | waysize = cpu_data->dcache.sets; | ||
42 | waysize <<= cpu_data->dcache.entry_shift; | ||
43 | |||
44 | addrstart = CACHE_OC_ADDRESS_ARRAY; | ||
45 | |||
46 | do { | ||
47 | unsigned long addr; | ||
48 | |||
49 | for (addr = addrstart; | ||
50 | addr < addrstart + waysize; | ||
51 | addr += cpu_data->dcache.linesz) { | ||
52 | unsigned long data; | ||
53 | int v = SH_CACHE_UPDATED | SH_CACHE_VALID; | ||
54 | |||
55 | data = ctrl_inl(addr); | ||
56 | |||
57 | if ((data & v) == v) | ||
58 | ctrl_outl(data & ~v, addr); | ||
59 | |||
60 | } | ||
61 | |||
62 | addrstart += cpu_data->dcache.way_incr; | ||
63 | } while (--ways); | ||
64 | } | ||
65 | |||
66 | /* | ||
67 | * Write back the range of D-cache, and purge the I-cache. | ||
68 | * | ||
69 | * Called from kernel/module.c:sys_init_module and routine for a.out format. | ||
70 | */ | ||
71 | void flush_icache_range(unsigned long start, unsigned long end) | ||
72 | { | ||
73 | __flush_wback_region((void *)start, end - start); | ||
74 | } | ||
75 | |||
76 | |||
77 | /* | ||
78 | * Writeback&Invalidate the D-cache of the page | ||
79 | */ | ||
80 | static void __flush_dcache_page(unsigned long phys) | ||
81 | { | ||
82 | unsigned long ways, waysize, addrstart; | ||
83 | unsigned long flags; | ||
84 | |||
85 | phys |= SH_CACHE_VALID; | ||
86 | |||
87 | /* | ||
88 | * Here, phys is the physical address of the page. We check all the | ||
89 | * tags in the cache for those with the same page number as this page | ||
90 | * (by masking off the lowest 2 bits of the 19-bit tag; these bits are | ||
91 | * derived from the offset within in the 4k page). Matching valid | ||
92 | * entries are invalidated. | ||
93 | * | ||
94 | * Since 2 bits of the cache index are derived from the virtual page | ||
95 | * number, knowing this would reduce the number of cache entries to be | ||
96 | * searched by a factor of 4. However this function exists to deal with | ||
97 | * potential cache aliasing, therefore the optimisation is probably not | ||
98 | * possible. | ||
99 | */ | ||
100 | local_irq_save(flags); | ||
101 | jump_to_P2(); | ||
102 | |||
103 | ways = cpu_data->dcache.ways; | ||
104 | waysize = cpu_data->dcache.sets; | ||
105 | waysize <<= cpu_data->dcache.entry_shift; | ||
106 | |||
107 | addrstart = CACHE_OC_ADDRESS_ARRAY; | ||
108 | |||
109 | do { | ||
110 | unsigned long addr; | ||
111 | |||
112 | for (addr = addrstart; | ||
113 | addr < addrstart + waysize; | ||
114 | addr += cpu_data->dcache.linesz) { | ||
115 | unsigned long data; | ||
116 | |||
117 | data = ctrl_inl(addr) & (0x1ffffC00 | SH_CACHE_VALID); | ||
118 | if (data == phys) { | ||
119 | data &= ~(SH_CACHE_VALID | SH_CACHE_UPDATED); | ||
120 | ctrl_outl(data, addr); | ||
121 | } | ||
122 | } | ||
123 | |||
124 | addrstart += cpu_data->dcache.way_incr; | ||
125 | } while (--ways); | ||
126 | |||
127 | back_to_P1(); | ||
128 | local_irq_restore(flags); | ||
129 | } | ||
130 | |||
131 | |||
132 | /* | ||
133 | * Write back & invalidate the D-cache of the page. | ||
134 | * (To avoid "alias" issues) | ||
135 | */ | ||
136 | void flush_dcache_page(struct page *page) | ||
137 | { | ||
138 | if (test_bit(PG_mapped, &page->flags)) | ||
139 | __flush_dcache_page(PHYSADDR(page_address(page))); | ||
140 | } | ||
141 | |||
142 | void flush_cache_all(void) | ||
143 | { | ||
144 | unsigned long flags; | ||
145 | |||
146 | local_irq_save(flags); | ||
147 | jump_to_P2(); | ||
148 | |||
149 | cache_wback_all(); | ||
150 | back_to_P1(); | ||
151 | local_irq_restore(flags); | ||
152 | } | ||
153 | |||
154 | void flush_cache_mm(struct mm_struct *mm) | ||
155 | { | ||
156 | /* Is there any good way? */ | ||
157 | /* XXX: possibly call flush_cache_range for each vm area */ | ||
158 | flush_cache_all(); | ||
159 | } | ||
160 | |||
161 | /* | ||
162 | * Write back and invalidate D-caches. | ||
163 | * | ||
164 | * START, END: Virtual Address (U0 address) | ||
165 | * | ||
166 | * NOTE: We need to flush the _physical_ page entry. | ||
167 | * Flushing the cache lines for U0 only isn't enough. | ||
168 | * We need to flush for P1 too, which may contain aliases. | ||
169 | */ | ||
170 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | ||
171 | unsigned long end) | ||
172 | { | ||
173 | |||
174 | /* | ||
175 | * We could call flush_cache_page for the pages of these range, | ||
176 | * but it's not efficient (scan the caches all the time...). | ||
177 | * | ||
178 | * We can't use A-bit magic, as there's the case we don't have | ||
179 | * valid entry on TLB. | ||
180 | */ | ||
181 | flush_cache_all(); | ||
182 | } | ||
183 | |||
184 | /* | ||
185 | * Write back and invalidate I/D-caches for the page. | ||
186 | * | ||
187 | * ADDRESS: Virtual Address (U0 address) | ||
188 | */ | ||
189 | void flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigned long pfn) | ||
190 | { | ||
191 | __flush_dcache_page(pfn << PAGE_SHIFT); | ||
192 | } | ||
193 | |||
194 | /* | ||
195 | * This is called when a page-cache page is about to be mapped into a | ||
196 | * user process' address space. It offers an opportunity for a | ||
197 | * port to ensure d-cache/i-cache coherency if necessary. | ||
198 | * | ||
199 | * Not entirely sure why this is necessary on SH3 with 32K cache but | ||
200 | * without it we get occasional "Memory fault" when loading a program. | ||
201 | */ | ||
202 | void flush_icache_page(struct vm_area_struct *vma, struct page *page) | ||
203 | { | ||
204 | __flush_purge_region(page_address(page), PAGE_SIZE); | ||
205 | } | ||
206 | |||